mirror of
https://github.com/moby/moby.git
synced 2026-01-18 01:48:07 +00:00
Compare commits
290 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
010d74ec2f | ||
|
|
bb76985d39 | ||
|
|
28ec47c441 | ||
|
|
d003cfea25 | ||
|
|
f3103e5c91 | ||
|
|
ef7e000a13 | ||
|
|
811341423b | ||
|
|
b3addb5fb8 | ||
|
|
00ee6d1925 | ||
|
|
6f8a79c23c | ||
|
|
cf8063d152 | ||
|
|
3e10fe1a15 | ||
|
|
45ecdf9c8e | ||
|
|
b942f24ba8 | ||
|
|
3779291e9b | ||
|
|
fa14a1b983 | ||
|
|
f9e14cc838 | ||
|
|
f2ea539467 | ||
|
|
7c4e5fbd46 | ||
|
|
97ef8a067c | ||
|
|
4b9e475a3d | ||
|
|
1d0aeae339 | ||
|
|
859856b3e4 | ||
|
|
b8b18a2b42 | ||
|
|
561d1db074 | ||
|
|
1f9abfe841 | ||
|
|
2b93f18223 | ||
|
|
8f3b8f3835 | ||
|
|
fca83b4cfb | ||
|
|
444a087ac2 | ||
|
|
e1c861cf33 | ||
|
|
6fe3da9924 | ||
|
|
f43f3fa218 | ||
|
|
1e551c7cc5 | ||
|
|
2c395ce8fb | ||
|
|
7799ae27ca | ||
|
|
bb754fd34d | ||
|
|
a0298c0bd0 | ||
|
|
a86a82cb7e | ||
|
|
36ab1836f9 | ||
|
|
5e8912e0e8 | ||
|
|
4e414f6205 | ||
|
|
4f31141e13 | ||
|
|
ee6823d797 | ||
|
|
1363dfdd1d | ||
|
|
b308e33106 | ||
|
|
e5b09523dc | ||
|
|
d44abae873 | ||
|
|
323c4b5211 | ||
|
|
5b4a0cac4e | ||
|
|
73294b6d56 | ||
|
|
d6ca05f7cb | ||
|
|
dfc2dc4d35 | ||
|
|
ea762c1a51 | ||
|
|
599009191a | ||
|
|
af50b2f17c | ||
|
|
2a1181f404 | ||
|
|
f7afbf34fe | ||
|
|
3069bf9460 | ||
|
|
32b9a429c5 | ||
|
|
76910d16cc | ||
|
|
42c7dc448f | ||
|
|
8502ad4ba7 | ||
|
|
58ec7855bc | ||
|
|
949fde88df | ||
|
|
5a9f45cb7a | ||
|
|
8f4a54734f | ||
|
|
9359d79c4f | ||
|
|
69db6ea867 | ||
|
|
3b89187d03 | ||
|
|
82a47b0e82 | ||
|
|
e0f07bc186 | ||
|
|
194eb246ef | ||
|
|
f560b87a86 | ||
|
|
c561212b83 | ||
|
|
81e596e272 | ||
|
|
acfdfa81be | ||
|
|
589515c717 | ||
|
|
523f726716 | ||
|
|
7fd6dcc831 | ||
|
|
848f290012 | ||
|
|
add97f7eb0 | ||
|
|
9dd7ae4074 | ||
|
|
5f55784224 | ||
|
|
f3816ee024 | ||
|
|
0b3e153588 | ||
|
|
2226989410 | ||
|
|
c23b15b9d8 | ||
|
|
055b32e3f4 | ||
|
|
907d9ce13c | ||
|
|
74d45789dd | ||
|
|
40522c0380 | ||
|
|
d5bb0ff80a | ||
|
|
ad80da3389 | ||
|
|
1f80c2a652 | ||
|
|
1bc3f6b7b5 | ||
|
|
643621133f | ||
|
|
fd240413ff | ||
|
|
392b1e99b2 | ||
|
|
0dfebf2d93 | ||
|
|
40aaebe56a | ||
|
|
a1dba16fe8 | ||
|
|
e31f1f1eba | ||
|
|
7e720d0a77 | ||
|
|
237868e9c3 | ||
|
|
fc197188d7 | ||
|
|
d59080d119 | ||
|
|
c6dcee329d | ||
|
|
484a75f354 | ||
|
|
434cf6c8ca | ||
|
|
83d631b6a4 | ||
|
|
8b82b0dfe7 | ||
|
|
e93b7b4647 | ||
|
|
06a818616b | ||
|
|
f50b8b08b5 | ||
|
|
cda146547e | ||
|
|
a17fd7b294 | ||
|
|
22162687df | ||
|
|
d256f3049b | ||
|
|
a1a4a99d7e | ||
|
|
4986958e7e | ||
|
|
cd735496da | ||
|
|
894d4a23fb | ||
|
|
fc9f4d8bad | ||
|
|
1d4b7d8fa1 | ||
|
|
360078d761 | ||
|
|
808f2d39bd | ||
|
|
d1ca12e81b | ||
|
|
a042c9fb1b | ||
|
|
721bb410f6 | ||
|
|
029625981d | ||
|
|
0fccf0f686 | ||
|
|
efaf2cac5c | ||
|
|
cb1fe939a8 | ||
|
|
c654aea4f2 | ||
|
|
d2d8a4a6c5 | ||
|
|
4100e9b7df | ||
|
|
5875953d9b | ||
|
|
3264c1c5eb | ||
|
|
f4ce106e02 | ||
|
|
7ec1236cee | ||
|
|
2b4bb67ce0 | ||
|
|
6155f07561 | ||
|
|
e6e35e5984 | ||
|
|
0d207abf8e | ||
|
|
a009d4ae8d | ||
|
|
b75f385abd | ||
|
|
7ce7516c12 | ||
|
|
f6b91262a7 | ||
|
|
d16d748132 | ||
|
|
3fc9de3d03 | ||
|
|
652c2c2a80 | ||
|
|
8e7db0432e | ||
|
|
e1a15b25dc | ||
|
|
b1a3a55802 | ||
|
|
614bc5c1e1 | ||
|
|
3fe4d5477a | ||
|
|
cda24e345c | ||
|
|
88037b2877 | ||
|
|
6cdd1aa350 | ||
|
|
ea8a3438f7 | ||
|
|
954158ce52 | ||
|
|
bf17383e35 | ||
|
|
83d81758b0 | ||
|
|
e3b878ce98 | ||
|
|
1e5f9334e0 | ||
|
|
3edbf416bf | ||
|
|
c2364b978d | ||
|
|
158e3d60ec | ||
|
|
e4e579b40d | ||
|
|
071528e103 | ||
|
|
a2fcd3d8f0 | ||
|
|
7d2e851d8e | ||
|
|
85f9b778f5 | ||
|
|
369cde4ad7 | ||
|
|
3ffc52bcf5 | ||
|
|
8dcca2125a | ||
|
|
cdd14b1a31 | ||
|
|
37ed178611 | ||
|
|
c995c9bb91 | ||
|
|
aa619de748 | ||
|
|
6fde28c293 | ||
|
|
f4358fc647 | ||
|
|
57e19b1475 | ||
|
|
8051b6c1a1 | ||
|
|
566ff54d0d | ||
|
|
f9359f59a8 | ||
|
|
e4561438f1 | ||
|
|
b8cd2bc94d | ||
|
|
f7ba1c34bb | ||
|
|
df87919165 | ||
|
|
733bf5d3dd | ||
|
|
efde305c05 | ||
|
|
636dfc82b0 | ||
|
|
93abcc3a3b | ||
|
|
c3ec696284 | ||
|
|
fdd81b423b | ||
|
|
cd89fe5c4f | ||
|
|
1636ed9826 | ||
|
|
8072d3a4e0 | ||
|
|
d215724ad6 | ||
|
|
0e6f0c4e02 | ||
|
|
629cc2fce4 | ||
|
|
8c52140059 | ||
|
|
f21bd80e90 | ||
|
|
4bdd4599f0 | ||
|
|
ed93dab9a8 | ||
|
|
62a81370ff | ||
|
|
e74c65c3db | ||
|
|
248eadd341 | ||
|
|
e829d5b6d2 | ||
|
|
35d8ac94f3 | ||
|
|
94821a3353 | ||
|
|
d14c162fd6 | ||
|
|
14d1c5a2c3 | ||
|
|
329d154209 | ||
|
|
7bc96aec7b | ||
|
|
a6fdc5d208 | ||
|
|
681b40c801 | ||
|
|
536da93380 | ||
|
|
45d7dcfea2 | ||
|
|
210fa0871c | ||
|
|
f768c6adb7 | ||
|
|
fde909ffb8 | ||
|
|
553b4dae45 | ||
|
|
929662a4d5 | ||
|
|
fbac812540 | ||
|
|
e481c82fa9 | ||
|
|
73a1ef7c22 | ||
|
|
c91c365f88 | ||
|
|
b8a4f570fb | ||
|
|
70c7220a99 | ||
|
|
0f45e3c6e0 | ||
|
|
be0beb897a | ||
|
|
8fa4c4b062 | ||
|
|
c06ab5f9c2 | ||
|
|
3ec39ad01a | ||
|
|
1940015824 | ||
|
|
1acefac97e | ||
|
|
f630fbc7cf | ||
|
|
e61f327ec9 | ||
|
|
c4444ce48f | ||
|
|
7ba0f1f421 | ||
|
|
30454bb85c | ||
|
|
2deb0c3365 | ||
|
|
efc0610c0e | ||
|
|
391676b598 | ||
|
|
5204feeaa9 | ||
|
|
81d112cb7f | ||
|
|
25be0b1e98 | ||
|
|
c56b045270 | ||
|
|
d9a1cc7e2b | ||
|
|
30b4a0f76a | ||
|
|
7d95145b76 | ||
|
|
379a7fab07 | ||
|
|
36e060299f | ||
|
|
a96a26c62f | ||
|
|
c3705e83e7 | ||
|
|
5e9b4a23e6 | ||
|
|
a1c5e276f4 | ||
|
|
eddda577a4 | ||
|
|
2ed1001c57 | ||
|
|
f02d766f9a | ||
|
|
2035af44aa | ||
|
|
746ae155fb | ||
|
|
a26801c73f | ||
|
|
670b326c1b | ||
|
|
15a6854119 | ||
|
|
3f9416b58d | ||
|
|
7afd7a82bd | ||
|
|
124da338fd | ||
|
|
69a31c3386 | ||
|
|
20605eb310 | ||
|
|
945a1f06f9 | ||
|
|
64136071c6 | ||
|
|
fe956ad449 | ||
|
|
fa48f17493 | ||
|
|
6721525068 | ||
|
|
5cfcb05486 | ||
|
|
78c22c24b3 | ||
|
|
4faba4fae7 | ||
|
|
e1efd4cb8c | ||
|
|
58daccab26 | ||
|
|
a8af12f80a | ||
|
|
10cd902f90 | ||
|
|
818c249bae | ||
|
|
2e6dbe87ad | ||
|
|
f379f667a2 | ||
|
|
8f382aaecd | ||
|
|
821a82ac6c |
23
.travis.yml
Normal file
23
.travis.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Note: right now we don't use go-specific features of travis.
|
||||
# Later we might automate "go test" etc. (or do it inside a docker container...?)
|
||||
|
||||
language: go
|
||||
|
||||
go: 1.2
|
||||
|
||||
# Disable the normal go build.
|
||||
install: true
|
||||
|
||||
before_script:
|
||||
- env | sort
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq python-yaml
|
||||
- git remote add upstream git://github.com/dotcloud/docker.git
|
||||
- git fetch --append --no-tags upstream refs/heads/master:refs/remotes/upstream/master
|
||||
# sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out
|
||||
|
||||
script:
|
||||
- hack/travis/dco.py
|
||||
- hack/travis/gofmt.py
|
||||
|
||||
# vim:set sw=2 ts=2:
|
||||
3
AUTHORS
3
AUTHORS
@@ -20,6 +20,7 @@ Antony Messerli <amesserl@rackspace.com>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
Barry Allard <barry.allard@gmail.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
Ben Wiklund <ben@daisyowl.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Bouke Haarsma <bouke@webatoom.nl>
|
||||
@@ -68,6 +69,7 @@ Francisco Souza <f@souza.cc>
|
||||
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
|
||||
Gabriel Monroy <gabriel@opdemand.com>
|
||||
Gareth Rushgrove <gareth@morethanseven.net>
|
||||
Graydon Hoare <graydon@pobox.com>
|
||||
Greg Thornton <xdissent@me.com>
|
||||
Guillaume J. Charmes <guillaume.charmes@dotcloud.com>
|
||||
Gurjeet Singh <gurjeet@singh.im>
|
||||
@@ -113,6 +115,7 @@ Kyle Conroy <kyle.j.conroy@gmail.com>
|
||||
Laurie Voss <github@seldo.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Manuel Meurer <manuel@krautcomputing.com>
|
||||
Manuel Woelker <docker@manuel.woelker.org>
|
||||
Marco Hennings <marco.hennings@freiheit.com>
|
||||
Marcus Farkas <toothlessgear@finitebox.com>
|
||||
Marcus Ramberg <marcus@nordaaker.com>
|
||||
|
||||
119
CHANGELOG.md
119
CHANGELOG.md
@@ -1,5 +1,100 @@
|
||||
# Changelog
|
||||
|
||||
## 0.7.4 (2014-01-07)
|
||||
|
||||
#### Builder
|
||||
|
||||
- Fix ADD caching issue with . prefixed path
|
||||
- Fix docker build on devicemapper by reverting sparse file tar option
|
||||
- Fix issue with file caching and prevent wrong cache hit
|
||||
* Use same error handling while unmarshalling CMD and ENTRYPOINT
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Simplify and streamline Amazon Quickstart
|
||||
* Install instructions use unprefixed fedora image
|
||||
* Update instructions for mtu flag for Docker on GCE
|
||||
+ Add Ubuntu Saucy to installation
|
||||
- Fix for wrong version warning on master instead of latest
|
||||
|
||||
#### Runtime
|
||||
|
||||
- Only get the image's rootfs when we need to calculate the image size
|
||||
- Correctly handle unmapping UDP ports
|
||||
* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build
|
||||
- Fix login message to say pull instead of push
|
||||
- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN
|
||||
* Make blank -H option default to the same as no -H was sent
|
||||
* Extract cgroups utilities to own submodule
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add Travis CI configuration to validate DCO and gofmt requirements
|
||||
+ Add Developer Certificate of Origin Text
|
||||
* Upgrade VBox Guest Additions
|
||||
* Check standalone header when pinging a registry server
|
||||
|
||||
## 0.7.3 (2014-01-02)
|
||||
|
||||
#### Builder
|
||||
|
||||
+ Update ADD to use the image cache, based on a hash of the added content
|
||||
* Add error message for empty Dockerfile
|
||||
|
||||
#### Documentation
|
||||
|
||||
- Fix outdated link to the "Introduction" on www.docker.io
|
||||
+ Update the docs to get wider when the screen does
|
||||
- Add information about needing to install LXC when using raw binaries
|
||||
* Update Fedora documentation to disentangle the docker and docker.io conflict
|
||||
* Add a note about using the new `-mtu` flag in several GCE zones
|
||||
+ Add FrugalWare installation instructions
|
||||
+ Add a more complete example of `docker run`
|
||||
- Fix API documentation for creating and starting Privileged containers
|
||||
- Add missing "name" parameter documentation on "/containers/create"
|
||||
* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration
|
||||
- Update the 1.8 API documentation with some additions that were added to the docs for 1.7
|
||||
|
||||
#### Hack
|
||||
|
||||
- Add missing libdevmapper dependency to the packagers documentation
|
||||
* Update minimum Go requirement to a hard line at Go 1.2+
|
||||
* Many minor improvements to the Vagrantfile
|
||||
+ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location)
|
||||
+ Add coverprofile generation reporting
|
||||
- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually
|
||||
* Update Dockerfile to be more canonical and have less spurious warnings during build
|
||||
- Fix some miscellaneous `docker pull` progress bar display issues
|
||||
* Migrate more miscellaneous packages under the "pkg" folder
|
||||
* Update TextMate highlighting to automatically be enabled for files named "Dockerfile"
|
||||
* Reorganize syntax highlighting files under a common "contrib/syntax" directory
|
||||
* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation
|
||||
* Add support for container names in bash completion
|
||||
|
||||
#### Packaging
|
||||
|
||||
+ Add an official Docker client binary for Darwin (Mac OS X)
|
||||
* Remove empty "Vendor" string and added "License" on deb package
|
||||
+ Add a stubbed version of "/etc/default/docker" in the deb package
|
||||
|
||||
#### Runtime
|
||||
|
||||
* Update layer application to extract tars in place, avoiding file churn while handling whiteouts
|
||||
- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision)
|
||||
* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`)
|
||||
+ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions
|
||||
- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files
|
||||
* Update container name validation to include '.'
|
||||
- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected
|
||||
* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler
|
||||
* Update to use proper box-drawing characters everywhere in `docker images -tree`
|
||||
* Move MTU setting from LXC configuration to directly use netlink
|
||||
* Add `-S` option to external tar invocation for more efficient spare file handling
|
||||
+ Add arch/os info to User-Agent string, especially for registry requests
|
||||
+ Add `-mtu` option to Docker daemon for configuring MTU
|
||||
- Fix `docker build` to exit with a non-zero exit code on error
|
||||
+ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation
|
||||
|
||||
## 0.7.2 (2013-12-16)
|
||||
|
||||
#### Runtime
|
||||
@@ -15,7 +110,7 @@
|
||||
- Prevent deletion of image if ANY container is depending on it even if the container is not running
|
||||
* Update docker push to use new progress display
|
||||
* Use os.Lstat to allow mounting unix sockets when inspecting volumes
|
||||
- Adjusted handling of inactive user login
|
||||
- Adjust handling of inactive user login
|
||||
- Add missing defines in devicemapper for older kernels
|
||||
- Allow untag operations with no container validation
|
||||
- Add auth config to docker build
|
||||
@@ -110,7 +205,7 @@
|
||||
|
||||
#### Runtime
|
||||
|
||||
* Improved stability, fixes some race conditons
|
||||
* Improve stability, fixes some race conditons
|
||||
* Skip the volumes mounted when deleting the volumes of container.
|
||||
* Fix layer size computation: handle hard links correctly
|
||||
* Use the work Path for docker cp CONTAINER:PATH
|
||||
@@ -153,7 +248,7 @@
|
||||
+ Add lock around write operations in graph
|
||||
* Check if port is valid
|
||||
* Fix restart runtime error with ghost container networking
|
||||
+ Added some more colors and animals to increase the pool of generated names
|
||||
+ Add some more colors and animals to increase the pool of generated names
|
||||
* Fix issues in docker inspect
|
||||
+ Escape apparmor confinement
|
||||
+ Set environment variables using a file.
|
||||
@@ -307,7 +402,7 @@
|
||||
* Improve network performance for VirtualBox
|
||||
* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.)
|
||||
- Fix contrib/mkimage-debian.sh apt caching prevention
|
||||
+ Added Dockerfile.tmLanguage to contrib
|
||||
+ Add Dockerfile.tmLanguage to contrib
|
||||
* Configured FPM to make /etc/init/docker.conf a config file
|
||||
* Enable SSH Agent forwarding in Vagrant VM
|
||||
* Several small tweaks/fixes for contrib/mkimage-debian.sh
|
||||
@@ -421,7 +516,7 @@
|
||||
* Mount /dev/shm as a tmpfs
|
||||
- Switch from http to https for get.docker.io
|
||||
* Let userland proxy handle container-bound traffic
|
||||
* Updated the Docker CLI to specify a value for the "Host" header.
|
||||
* Update the Docker CLI to specify a value for the "Host" header.
|
||||
- Change network range to avoid conflict with EC2 DNS
|
||||
- Reduce connect and read timeout when pinging the registry
|
||||
* Parallel pull
|
||||
@@ -617,7 +712,7 @@
|
||||
|
||||
+ Builder: 'docker build git://URL' fetches and builds a remote git repository
|
||||
* Runtime: 'docker ps -s' optionally prints container size
|
||||
* Tests: Improved and simplified
|
||||
* Tests: improved and simplified
|
||||
- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail.
|
||||
- Builder: fix a regression when using ADD with single regular file.
|
||||
|
||||
@@ -632,7 +727,7 @@
|
||||
+ ADD of a local file will detect tar archives and unpack them
|
||||
* ADD improvements: use tar for copy + automatically unpack local archives
|
||||
* ADD uses tar/untar for copies instead of calling 'cp -ar'
|
||||
* Fixed the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
|
||||
* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented.
|
||||
- Fix a bug which caused builds to fail if ADD was the first command
|
||||
* Nicer output for 'docker build'
|
||||
|
||||
@@ -677,7 +772,7 @@
|
||||
+ Detect faulty DNS configuration and replace it with a public default
|
||||
+ Allow docker run <name>:<id>
|
||||
+ You can now specify public port (ex: -p 80:4500)
|
||||
* Improved image removal to garbage-collect unreferenced parents
|
||||
* Improve image removal to garbage-collect unreferenced parents
|
||||
|
||||
#### Client
|
||||
|
||||
@@ -731,7 +826,7 @@
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Improved install instructions.
|
||||
* Improve install instructions.
|
||||
|
||||
## 0.3.3 (2013-05-23)
|
||||
|
||||
@@ -816,7 +911,7 @@
|
||||
|
||||
+ Support for data volumes ('docker run -v=PATH')
|
||||
+ Share data volumes between containers ('docker run -volumes-from')
|
||||
+ Improved documentation
|
||||
+ Improve documentation
|
||||
* Upgrade to Go 1.0.3
|
||||
* Various upgrades to the dev environment for contributors
|
||||
|
||||
@@ -872,7 +967,7 @@
|
||||
- Add debian packaging
|
||||
- Documentation: installing on Arch Linux
|
||||
- Documentation: running Redis on docker
|
||||
- Fixed lxc 0.9 compatibility
|
||||
- Fix lxc 0.9 compatibility
|
||||
- Automatically load aufs module
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
@@ -907,7 +1002,7 @@
|
||||
- Stabilize process management
|
||||
- Layers can include a commit message
|
||||
- Simplified 'docker attach'
|
||||
- Fixed support for re-attaching
|
||||
- Fix support for re-attaching
|
||||
- Various bugfixes and stability improvements
|
||||
- Auto-download at run
|
||||
- Auto-login on push
|
||||
|
||||
@@ -105,17 +105,39 @@ name and email address match your git configuration. The AUTHORS file is
|
||||
regenerated occasionally from the git commit history, so a mismatch may result
|
||||
in your changes being overwritten.
|
||||
|
||||
### Approval
|
||||
### Sign your work
|
||||
|
||||
Docker maintainers use LGTM (looks good to me) in comments on the code review
|
||||
to indicate acceptance.
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below:
|
||||
|
||||
```
|
||||
Docker Developer Grant and Certificate of Origin 1.0
|
||||
|
||||
By making a contribution to the Docker Project ("Project"), I represent and warrant that:
|
||||
|
||||
a. The contribution was created in whole or in part by me and I have the right to submit the contribution on my own behalf or on behalf of a third party who has authorized me to submit this contribution to the Project; or
|
||||
|
||||
|
||||
b. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right and authorization to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license) that I have identified in the contribution; or
|
||||
|
||||
c. The contribution was provided directly to me by some other person who represented and warranted (a) or (b) and I have not modified it.
|
||||
|
||||
d. I understand and agree that this Project and the contribution are publicly known and that a record of the contribution (including all personal information I submit with it, including my sign-off record) is maintained indefinitely and may be redistributed consistent with this Project or the open source license(s) involved.
|
||||
|
||||
e. I hereby grant to the Project, Docker, Inc and its successors; and recipients of software distributed by the Project a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, modify, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute this contribution and such modifications and derivative works consistent with this Project, the open source license indicated in the previous work or other appropriate open source license specified by the Project and approved by the Open Source Initiative(OSI) at http://www.opensource.org.
|
||||
```
|
||||
|
||||
then you just add a line saying
|
||||
|
||||
Docker-DCO-1.0-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
|
||||
|
||||
A change requires LGTMs from an absolute majority of the maintainers of each
|
||||
component affected. For example, if a change affects docs/ and registry/, it
|
||||
needs an absolute majority from the maintainers of docs/ AND, separately, an
|
||||
absolute majority of the maintainers of registry
|
||||
|
||||
For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
|
||||
69
Dockerfile
69
Dockerfile
@@ -24,40 +24,32 @@
|
||||
#
|
||||
|
||||
docker-version 0.6.1
|
||||
FROM ubuntu:12.04
|
||||
MAINTAINER Solomon Hykes <solomon@dotcloud.com>
|
||||
FROM stackbrew/ubuntu:12.04
|
||||
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
||||
# Build dependencies
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y -q curl
|
||||
RUN apt-get install -y -q git
|
||||
RUN apt-get install -y -q mercurial
|
||||
RUN apt-get install -y -q build-essential libsqlite3-dev
|
||||
# Add precise-backports to get s3cmd >= 1.1.0 (so we get ENV variable support in our .s3cfg)
|
||||
RUN echo 'deb http://archive.ubuntu.com/ubuntu precise-backports main universe' > /etc/apt/sources.list.d/backports.list
|
||||
|
||||
# Install Go
|
||||
RUN curl -s https://go.googlecode.com/files/go1.2.src.tar.gz | tar -v -C /usr/local -xz
|
||||
ENV PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
|
||||
ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
RUN cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
|
||||
|
||||
# Ubuntu stuff
|
||||
RUN apt-get install -y -q ruby1.9.3 rubygems libffi-dev
|
||||
RUN gem install --no-rdoc --no-ri fpm
|
||||
RUN apt-get install -y -q reprepro dpkg-sig
|
||||
|
||||
RUN apt-get install -y -q python-pip
|
||||
RUN pip install s3cmd==1.1.0-beta3
|
||||
RUN pip install python-magic==0.4.6
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
|
||||
|
||||
# Runtime dependencies
|
||||
RUN apt-get install -y -q iptables
|
||||
RUN apt-get install -y -q lxc
|
||||
RUN apt-get install -y -q aufs-tools
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
|
||||
apt-utils \
|
||||
aufs-tools \
|
||||
build-essential \
|
||||
curl \
|
||||
dpkg-sig \
|
||||
git \
|
||||
iptables \
|
||||
libsqlite3-dev \
|
||||
lxc \
|
||||
mercurial \
|
||||
reprepro \
|
||||
ruby1.9.1 \
|
||||
ruby1.9.1-dev \
|
||||
s3cmd=1.1.0* \
|
||||
--no-install-recommends
|
||||
|
||||
# Get lvm2 source for compiling statically
|
||||
RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout v2_02_103
|
||||
RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
|
||||
# note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5
|
||||
|
||||
@@ -65,9 +57,26 @@ RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /u
|
||||
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
|
||||
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
|
||||
|
||||
# Install Go
|
||||
RUN curl -s https://go.googlecode.com/files/go1.2.src.tar.gz | tar -v -C /usr/local -xz
|
||||
ENV PATH /usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
|
||||
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
|
||||
|
||||
# Compile Go for cross compilation
|
||||
ENV DOCKER_CROSSPLATFORMS darwin/amd64 darwin/386
|
||||
# TODO add linux/386 and linux/arm
|
||||
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
||||
|
||||
# Grab Go's cover tool for dead-simple code coverage testing
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover
|
||||
|
||||
# TODO replace FPM with some very minimal debhelper stuff
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.0.1
|
||||
|
||||
# Setup s3cmd config
|
||||
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/dotcloud/docker
|
||||
|
||||
|
||||
@@ -2,5 +2,8 @@ Solomon Hykes <solomon@dotcloud.com> (@shykes)
|
||||
Guillaume Charmes <guillaume@dotcloud.com> (@creack)
|
||||
Victor Vieux <victor@dotcloud.com> (@vieux)
|
||||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
||||
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
api.go: Victor Vieux <victor@dotcloud.com> (@vieux)
|
||||
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Vagrantfile: Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)
|
||||
|
||||
5
Makefile
5
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all binary build default docs shell test
|
||||
.PHONY: all binary build cross default docs shell test
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
|
||||
|
||||
@@ -10,6 +10,9 @@ all: build
|
||||
binary: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary
|
||||
|
||||
cross: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
|
||||
docs:
|
||||
docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
|
||||
|
||||
|
||||
46
REMOTE_TODO.md
Normal file
46
REMOTE_TODO.md
Normal file
@@ -0,0 +1,46 @@
|
||||
```
|
||||
**GET**
|
||||
send objects deprecate multi-stream
|
||||
TODO "/events": getEvents, N
|
||||
ok "/info": getInfo, 1
|
||||
ok "/version": getVersion, 1
|
||||
... "/images/json": getImagesJSON, N
|
||||
TODO "/images/viz": getImagesViz, 0 yes
|
||||
TODO "/images/search": getImagesSearch, N
|
||||
#3490 "/images/{name:.*}/get": getImagesGet, 0
|
||||
TODO "/images/{name:.*}/history": getImagesHistory, 1
|
||||
TODO "/images/{name:.*}/json": getImagesByName, 1
|
||||
TODO "/containers/ps": getContainersJSON, N
|
||||
TODO "/containers/json": getContainersJSON, 1
|
||||
ok "/containers/{name:.*}/export": getContainersExport, 0
|
||||
TODO "/containers/{name:.*}/changes": getContainersChanges, 1
|
||||
TODO "/containers/{name:.*}/json": getContainersByName, 1
|
||||
TODO "/containers/{name:.*}/top": getContainersTop, N
|
||||
TODO "/containers/{name:.*}/attach/ws": wsContainersAttach, 0 yes
|
||||
|
||||
**POST**
|
||||
TODO "/auth": postAuth, 0 yes
|
||||
ok "/commit": postCommit, 0
|
||||
TODO "/build": postBuild, 0 yes
|
||||
TODO "/images/create": postImagesCreate, N yes yes (pull)
|
||||
TODO "/images/{name:.*}/insert": postImagesInsert, N yes yes
|
||||
TODO "/images/load": postImagesLoad, 1 yes (stdin)
|
||||
TODO "/images/{name:.*}/push": postImagesPush, N yes
|
||||
ok "/images/{name:.*}/tag": postImagesTag, 0
|
||||
ok "/containers/create": postContainersCreate, 0
|
||||
ok "/containers/{name:.*}/kill": postContainersKill, 0
|
||||
#3476 "/containers/{name:.*}/restart": postContainersRestart, 0
|
||||
ok "/containers/{name:.*}/start": postContainersStart, 0
|
||||
ok "/containers/{name:.*}/stop": postContainersStop, 0
|
||||
ok "/containers/{name:.*}/wait": postContainersWait, 0
|
||||
ok "/containers/{name:.*}/resize": postContainersResize, 0
|
||||
TODO "/containers/{name:.*}/attach": postContainersAttach, 0 yes
|
||||
TODO "/containers/{name:.*}/copy": postContainersCopy, 0 yes
|
||||
|
||||
**DELETE**
|
||||
#3180 "/containers/{name:.*}": deleteContainers, 0
|
||||
TODO "/images/{name:.*}": deleteImages, N
|
||||
|
||||
**OPTIONS**
|
||||
ok "": optionsHandler, 0
|
||||
```
|
||||
12
Vagrantfile
vendored
12
Vagrantfile
vendored
@@ -26,7 +26,7 @@ fi
|
||||
# Adding an apt gpg key is idempotent.
|
||||
wget -q -O - https://get.docker.io/gpg | apt-key add -
|
||||
|
||||
# Creating the docker.list file is idempotent, but it may overrite desired
|
||||
# Creating the docker.list file is idempotent, but it may overwrite desired
|
||||
# settings if it already exists. This could be solved with md5sum but it
|
||||
# doesn't seem worth it.
|
||||
echo 'deb http://get.docker.io/ubuntu docker main' > \
|
||||
@@ -41,7 +41,7 @@ apt-get install -q -y lxc-docker
|
||||
usermod -a -G docker "$user"
|
||||
|
||||
tmp=`mktemp -q` && {
|
||||
# Only install the backport kernel, don't bother upgrade if the backport is
|
||||
# Only install the backport kernel, don't bother upgrading if the backport is
|
||||
# already installed. We want parse the output of apt so we need to save it
|
||||
# with 'tee'. NOTE: The installation of the kernel will trigger dkms to
|
||||
# install vboxguest if needed.
|
||||
@@ -70,7 +70,7 @@ SCRIPT
|
||||
# trigger dkms to build the virtualbox guest module install.
|
||||
$vbox_script = <<VBOX_SCRIPT + $script
|
||||
# Install the VirtualBox guest additions if they aren't already installed.
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.4/ ]; then
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.6/ ]; then
|
||||
# Update remote package metadata. 'apt-get update' is idempotent.
|
||||
apt-get update -q
|
||||
|
||||
@@ -79,10 +79,10 @@ if [ ! -d /opt/VBoxGuestAdditions-4.3.4/ ]; then
|
||||
apt-get install -q -y linux-headers-generic-lts-raring dkms
|
||||
|
||||
echo 'Downloading VBox Guest Additions...'
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.4/VBoxGuestAdditions_4.3.4.iso
|
||||
echo "f120793fa35050a8280eacf9c930cf8d9b88795161520f6515c0cc5edda2fe8a VBoxGuestAdditions_4.3.4.iso" | sha256sum --check || exit 1
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.6/VBoxGuestAdditions_4.3.6.iso
|
||||
echo "95648fcdb5d028e64145a2fe2f2f28c946d219da366389295a61fed296ca79f0 VBoxGuestAdditions_4.3.6.iso" | sha256sum --check || exit 1
|
||||
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.4.iso /mnt
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.6.iso /mnt
|
||||
/mnt/VBoxLinuxAdditions.run --nox11
|
||||
umount /mnt
|
||||
fi
|
||||
|
||||
2
api.go
2
api.go
@@ -10,7 +10,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/systemd"
|
||||
"github.com/dotcloud/docker/pkg/systemd"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/gorilla/mux"
|
||||
"io"
|
||||
|
||||
@@ -3,6 +3,8 @@ package archive
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
@@ -59,6 +61,43 @@ func DetectCompression(source []byte) Compression {
|
||||
return Uncompressed
|
||||
}
|
||||
|
||||
func xzDecompress(archive io.Reader) (io.Reader, error) {
|
||||
args := []string{"xz", "-d", "-c", "-q"}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), archive, nil)
|
||||
}
|
||||
|
||||
func DecompressStream(archive io.Reader) (io.Reader, error) {
|
||||
buf := make([]byte, 10)
|
||||
totalN := 0
|
||||
for totalN < 10 {
|
||||
n, err := archive.Read(buf[totalN:])
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, fmt.Errorf("Tarball too short")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
totalN += n
|
||||
utils.Debugf("[tar autodetect] n: %d", n)
|
||||
}
|
||||
compression := DetectCompression(buf)
|
||||
wrap := io.MultiReader(bytes.NewReader(buf), archive)
|
||||
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
return wrap, nil
|
||||
case Gzip:
|
||||
return gzip.NewReader(wrap)
|
||||
case Bzip2:
|
||||
return bzip2.NewReader(wrap), nil
|
||||
case Xz:
|
||||
return xzDecompress(wrap)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
func (compression *Compression) Flag() string {
|
||||
switch *compression {
|
||||
case Bzip2:
|
||||
@@ -155,7 +194,7 @@ func TarFilter(path string, options *TarOptions) (io.Reader, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), &files, func() {
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), bytes.NewBufferString(files), func() {
|
||||
if tmpDir != "" {
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
}
|
||||
@@ -260,7 +299,7 @@ func CopyWithTar(src, dst string) error {
|
||||
//
|
||||
// If `dst` ends with a trailing slash '/', the final destination path
|
||||
// will be `dst/base(src)`.
|
||||
func CopyFileWithTar(src, dst string) error {
|
||||
func CopyFileWithTar(src, dst string) (err error) {
|
||||
utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
@@ -277,31 +316,44 @@ func CopyFileWithTar(src, dst string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.Close()
|
||||
return Untar(buf, filepath.Dir(dst), nil)
|
||||
|
||||
r, w := io.Pipe()
|
||||
errC := utils.Go(func() error {
|
||||
defer w.Close()
|
||||
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
tw := tar.NewWriter(w)
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.Close()
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if er := <-errC; err != nil {
|
||||
err = er
|
||||
}
|
||||
}()
|
||||
return Untar(r, filepath.Dir(dst), nil)
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
func CmdStream(cmd *exec.Cmd, input *string, atEnd func()) (io.Reader, error) {
|
||||
func CmdStream(cmd *exec.Cmd, input io.Reader, atEnd func()) (io.Reader, error) {
|
||||
if input != nil {
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
@@ -312,7 +364,7 @@ func CmdStream(cmd *exec.Cmd, input *string, atEnd func()) (io.Reader, error) {
|
||||
}
|
||||
// Write stdin if any
|
||||
go func() {
|
||||
_, _ = stdin.Write([]byte(*input))
|
||||
io.Copy(stdin, input)
|
||||
stdin.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ChangeType int
|
||||
@@ -34,6 +35,21 @@ func (change *Change) String() string {
|
||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
||||
}
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
var changes []Change
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
@@ -85,7 +101,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && f.ModTime() == stat.ModTime() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
@@ -181,7 +197,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
oldStat.Rdev != newStat.Rdev ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
|
||||
getLastModification(oldStat) != getLastModification(newStat) {
|
||||
!sameFsTimeSpec(getLastModification(oldStat), getLastModification(newStat)) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
|
||||
@@ -258,48 +258,44 @@ func TestChangesDirsMutated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestApplyLayer(t *testing.T) {
|
||||
t.Skip("Skipping TestApplyLayer due to known failures") // Disable this for now as it is broken
|
||||
return
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
defer os.RemoveAll(src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mutateSampleDir(t, dst)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
// src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// createSampleDir(t, src)
|
||||
// dst := src + "-copy"
|
||||
// if err := copyDir(src, dst); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// mutateSampleDir(t, dst)
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// changes, err := ChangesDirs(dst, src)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
layer, err := ExportChanges(dst, changes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// layer, err := ExportChanges(dst, changes)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
layerCopy, err := NewTempArchive(layer, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// layerCopy, err := NewTempArchive(layer, "")
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
if err := ApplyLayer(src, layerCopy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// if err := ApplyLayer(src, layerCopy); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
changes2, err := ChangesDirs(src, dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// changes2, err := ChangesDirs(src, dst)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
|
||||
// if len(changes2) != 0 {
|
||||
// t.Fatalf("Unexpected differences after re applying mutation: %v", changes)
|
||||
// }
|
||||
|
||||
// os.RemoveAll(src)
|
||||
// os.RemoveAll(dst)
|
||||
if len(changes2) != 0 {
|
||||
t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
|
||||
}
|
||||
}
|
||||
|
||||
233
archive/diff.go
233
archive/diff.go
@@ -1,6 +1,9 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -8,87 +11,181 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
|
||||
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
|
||||
// then the top 12 bits of the minor
|
||||
func mkdev(major int64, minor int64) uint32 {
|
||||
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
|
||||
}
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = ((1 << 30) - 2)
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`.
|
||||
func ApplyLayer(dest string, layer Archive) error {
|
||||
// Poor man's diff applyer in 2 steps:
|
||||
// We need to be able to set any perms
|
||||
oldmask := syscall.Umask(0)
|
||||
defer syscall.Umask(oldmask)
|
||||
|
||||
// Step 1: untar everything in place
|
||||
if err := Untar(layer, dest, nil); err != nil {
|
||||
layer, err := DecompressStream(layer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modifiedDirs := make(map[string]*syscall.Stat_t)
|
||||
addDir := func(file string) {
|
||||
d := filepath.Dir(file)
|
||||
if _, exists := modifiedDirs[d]; !exists {
|
||||
if s, err := os.Lstat(d); err == nil {
|
||||
if sys := s.Sys(); sys != nil {
|
||||
if stat, ok := sys.(*syscall.Stat_t); ok {
|
||||
modifiedDirs[d] = stat
|
||||
tr := tar.NewReader(layer)
|
||||
|
||||
var dirs []*tar.Header
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
if !strings.HasSuffix(hdr.Name, "/") {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(parentPath, 600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
base := filepath.Base(path)
|
||||
if strings.HasPrefix(base, ".wh.") {
|
||||
originalBase := base[len(".wh."):]
|
||||
originalPath := filepath.Join(filepath.Dir(path), originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
hasDir := false
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if fi.IsDir() && hdr.Typeflag == tar.TypeDir {
|
||||
hasDir = true
|
||||
} else {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if !hasDir {
|
||||
err = os.Mkdir(path, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
dirs = append(dirs, hdr)
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
// Source is regular file
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.FileMode(hdr.Mode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(file, tr); err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= syscall.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= syscall.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= syscall.S_IFIFO
|
||||
}
|
||||
|
||||
if err := syscall.Mknod(path, mode, int(mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
if err := os.Link(filepath.Join(dest, hdr.Linkname), path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := os.Symlink(hdr.Linkname, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
utils.Debugf("unhandled type %d\n", hdr.Typeflag)
|
||||
}
|
||||
|
||||
if err = syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
err = syscall.Chmod(path, uint32(hdr.Mode&07777))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Directories must be handled at the end to avoid further
|
||||
// file creation in them to modify the mtime
|
||||
if hdr.Typeflag != tar.TypeDir {
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and
|
||||
if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := LUtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: walk for whiteouts and apply them, removing them in the process
|
||||
err := filepath.Walk(dest, func(fullPath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// This happens in the case of whiteouts in parent dir removing a directory
|
||||
// We just ignore it
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err := filepath.Rel(dest, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join("/", path)
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match("/.wh..wh.*", path); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
addDir(fullPath)
|
||||
if err := os.RemoveAll(fullPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
filename := filepath.Base(path)
|
||||
if strings.HasPrefix(filename, ".wh.") {
|
||||
rmTargetName := filename[len(".wh."):]
|
||||
rmTargetPath := filepath.Join(filepath.Dir(fullPath), rmTargetName)
|
||||
|
||||
// Remove the file targeted by the whiteout
|
||||
addDir(rmTargetPath)
|
||||
if err := os.RemoveAll(rmTargetPath); err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove the whiteout itself
|
||||
addDir(fullPath)
|
||||
if err := os.RemoveAll(fullPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range modifiedDirs {
|
||||
lastAccess := getLastAccess(v)
|
||||
lastModification := getLastModification(v)
|
||||
aTime := time.Unix(lastAccess.Unix())
|
||||
mTime := time.Unix(lastModification.Unix())
|
||||
|
||||
if err := os.Chtimes(k, aTime, mTime); err != nil {
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,3 +9,7 @@ func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
|
||||
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Mtimespec
|
||||
}
|
||||
|
||||
func LUtimesNano(path string, ts []syscall.Timespec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package archive
|
||||
|
||||
import "syscall"
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Atim
|
||||
@@ -9,3 +12,21 @@ func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
|
||||
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
|
||||
return stat.Mtim
|
||||
}
|
||||
|
||||
func LUtimesNano(path string, ts []syscall.Timespec) error {
|
||||
// These are not currently available in syscall
|
||||
AT_FDCWD := -100
|
||||
AT_SYMLINK_NOFOLLOW := 0x100
|
||||
|
||||
var _path *byte
|
||||
_path, err := syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
10
auth/auth.go
10
auth/auth.go
@@ -163,7 +163,7 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
|
||||
|
||||
loginAgainstOfficialIndex := serverAddress == IndexServerAddress()
|
||||
|
||||
// to avoid sending the server address to the server it should be removed before marshalled
|
||||
// to avoid sending the server address to the server it should be removed before being marshalled
|
||||
authCopy := *authConfig
|
||||
authCopy.ServerAddress = ""
|
||||
|
||||
@@ -254,11 +254,11 @@ func (config *ConfigFile) ResolveAuthConfig(registry string) AuthConfig {
|
||||
// default to the index server
|
||||
return config.Configs[IndexServerAddress()]
|
||||
}
|
||||
// if its not the index server there are three cases:
|
||||
// if it's not the index server there are three cases:
|
||||
//
|
||||
// 1. this is a full config url -> it should be used as is
|
||||
// 2. it could be a full url, but with the wrong protocol
|
||||
// 3. it can be the hostname optionally with a port
|
||||
// 1. a full config url -> it should be used as is
|
||||
// 2. a full url, but with the wrong protocol
|
||||
// 3. a hostname, with an optional port
|
||||
//
|
||||
// as there is only one auth entry which is fully qualified we need to start
|
||||
// parsing and matching
|
||||
|
||||
278
buildfile.go
278
buildfile.go
@@ -1,7 +1,10 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
@@ -11,11 +14,17 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
||||
)
|
||||
|
||||
type BuildFile interface {
|
||||
Build(io.Reader) (string, error)
|
||||
CmdFrom(string) error
|
||||
@@ -26,10 +35,13 @@ type buildFile struct {
|
||||
runtime *Runtime
|
||||
srv *Server
|
||||
|
||||
image string
|
||||
maintainer string
|
||||
config *Config
|
||||
context string
|
||||
image string
|
||||
maintainer string
|
||||
config *Config
|
||||
|
||||
contextPath string
|
||||
context *utils.TarSum
|
||||
|
||||
verbose bool
|
||||
utilizeCache bool
|
||||
rm bool
|
||||
@@ -87,6 +99,27 @@ func (b *buildFile) CmdMaintainer(name string) error {
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
|
||||
}
|
||||
|
||||
// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
|
||||
// and if so attempts to look up the current `b.image` and `b.config` pair
|
||||
// in the current server `b.srv`. If an image is found, probeCache returns
|
||||
// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
|
||||
// is any error, it returns `(false, err)`.
|
||||
func (b *buildFile) probeCache() (bool, error) {
|
||||
if b.utilizeCache {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return false, err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return true, nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdRun(args string) error {
|
||||
if b.image == "" {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
@@ -104,17 +137,12 @@ func (b *buildFile) CmdRun(args string) error {
|
||||
|
||||
utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
||||
|
||||
if b.utilizeCache {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
cid, err := b.run()
|
||||
@@ -185,16 +213,30 @@ func (b *buildFile) CmdEnv(args string) error {
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCmd(args string) error {
|
||||
func (b *buildFile) buildCmdFromJson(args string) []string {
|
||||
var cmd []string
|
||||
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
||||
utils.Debugf("Error unmarshalling: %s, setting cmd to /bin/sh -c", err)
|
||||
utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
||||
cmd = []string{"/bin/sh", "-c", args}
|
||||
}
|
||||
if err := b.commit("", cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCmd(args string) error {
|
||||
cmd := b.buildCmdFromJson(args)
|
||||
b.config.Cmd = cmd
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdEntrypoint(args string) error {
|
||||
entrypoint := b.buildCmdFromJson(args)
|
||||
b.config.Entrypoint = entrypoint
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
|
||||
return err
|
||||
}
|
||||
b.config.Cmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -217,23 +259,6 @@ func (b *buildFile) CmdCopy(args string) error {
|
||||
return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdEntrypoint(args string) error {
|
||||
if args == "" {
|
||||
return fmt.Errorf("Entrypoint cannot be empty")
|
||||
}
|
||||
|
||||
var entrypoint []string
|
||||
if err := json.Unmarshal([]byte(args), &entrypoint); err != nil {
|
||||
b.config.Entrypoint = []string{"/bin/sh", "-c", args}
|
||||
} else {
|
||||
b.config.Entrypoint = entrypoint
|
||||
}
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %s", args)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdWorkdir(workdir string) error {
|
||||
b.config.WorkingDir = workdir
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
||||
@@ -260,44 +285,27 @@ func (b *buildFile) CmdVolume(args string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) addRemote(container *Container, orig, dest string) error {
|
||||
file, err := utils.Download(orig)
|
||||
func (b *buildFile) checkPathForAddition(orig string) error {
|
||||
origPath := path.Join(b.contextPath, orig)
|
||||
if !strings.HasPrefix(origPath, b.contextPath) {
|
||||
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
||||
}
|
||||
_, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
// If the destination is a directory, figure out the filename.
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
u, err := url.Parse(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := u.Path
|
||||
if strings.HasSuffix(path, "/") {
|
||||
path = path[:len(path)-1]
|
||||
}
|
||||
parts := strings.Split(path, "/")
|
||||
filename := parts[len(parts)-1]
|
||||
if filename == "" {
|
||||
return fmt.Errorf("cannot determine filename from url: %s", u)
|
||||
}
|
||||
dest = dest + filename
|
||||
}
|
||||
|
||||
return container.Inject(file.Body, dest)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
||||
origPath := path.Join(b.context, orig)
|
||||
destPath := path.Join(container.RootfsPath(), dest)
|
||||
var (
|
||||
origPath = path.Join(b.contextPath, orig)
|
||||
destPath = path.Join(container.RootfsPath(), dest)
|
||||
)
|
||||
// Preserve the trailing '/'
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
destPath = destPath + "/"
|
||||
}
|
||||
if !strings.HasPrefix(origPath, b.context) {
|
||||
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
||||
}
|
||||
fi, err := os.Stat(origPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: no such file or directory", orig)
|
||||
@@ -321,7 +329,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdAdd(args string) error {
|
||||
if b.context == "" {
|
||||
if b.context == nil {
|
||||
return fmt.Errorf("No context given. Impossible to use ADD")
|
||||
}
|
||||
tmp := strings.SplitN(args, " ", 2)
|
||||
@@ -341,8 +349,100 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
|
||||
cmd := b.config.Cmd
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)}
|
||||
|
||||
b.config.Image = b.image
|
||||
|
||||
// FIXME: do we really need this?
|
||||
var (
|
||||
origPath = orig
|
||||
destPath = dest
|
||||
)
|
||||
|
||||
if utils.IsURL(orig) {
|
||||
resp, err := utils.Download(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpFileName := path.Join(tmpDirName, "tmp")
|
||||
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpDirName)
|
||||
if _, err = io.Copy(tmpFile, resp.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
||||
tmpFile.Close()
|
||||
|
||||
// If the destination is a directory, figure out the filename.
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
u, err := url.Parse(orig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := u.Path
|
||||
if strings.HasSuffix(path, "/") {
|
||||
path = path[:len(path)-1]
|
||||
}
|
||||
parts := strings.Split(path, "/")
|
||||
filename := parts[len(parts)-1]
|
||||
if filename == "" {
|
||||
return fmt.Errorf("cannot determine filename from url: %s", u)
|
||||
}
|
||||
destPath = dest + filename
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.checkPathForAddition(origPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Hash path and check the cache
|
||||
if b.utilizeCache {
|
||||
var (
|
||||
hash string
|
||||
sums = b.context.GetSums()
|
||||
)
|
||||
|
||||
// Has tarsum strips the '.' and './', we put it back for comparaison.
|
||||
for file, sum := range sums {
|
||||
if len(file) == 0 || file[0] != '.' && file[0] != '/' {
|
||||
delete(sums, file)
|
||||
sums["./"+file] = sum
|
||||
}
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
var subfiles []string
|
||||
for file, sum := range sums {
|
||||
if strings.HasPrefix(file, origPath) {
|
||||
subfiles = append(subfiles, sum)
|
||||
}
|
||||
}
|
||||
sort.Strings(subfiles)
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
||||
hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
||||
} else {
|
||||
hash = "file:" + sums[origPath]
|
||||
}
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)}
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we do not have a hash, never use the cache
|
||||
if hit && hash != "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Create the container and start it
|
||||
container, _, err := b.runtime.Create(b.config, "")
|
||||
if err != nil {
|
||||
@@ -355,14 +455,8 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
}
|
||||
defer container.Unmount()
|
||||
|
||||
if utils.IsURL(orig) {
|
||||
if err := b.addRemote(container, orig, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := b.addContext(container, orig, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.addContext(container, origPath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil {
|
||||
@@ -460,17 +554,12 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
||||
defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
||||
|
||||
if b.utilizeCache {
|
||||
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
||||
return err
|
||||
} else if cache != nil {
|
||||
fmt.Fprintf(b.outStream, " ---> Using cache\n")
|
||||
utils.Debugf("[BUILDER] Use cached version")
|
||||
b.image = cache.ID
|
||||
return nil
|
||||
} else {
|
||||
utils.Debugf("[BUILDER] Cache miss")
|
||||
}
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hit {
|
||||
return nil
|
||||
}
|
||||
|
||||
container, warnings, err := b.runtime.Create(b.config, "")
|
||||
@@ -511,17 +600,17 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
||||
var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`)
|
||||
|
||||
func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
// FIXME: @creack "name" is a terrible variable name
|
||||
name, err := ioutil.TempDir("", "docker-build")
|
||||
tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := archive.Untar(context, name, nil); err != nil {
|
||||
b.context = &utils.TarSum{Reader: context}
|
||||
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer os.RemoveAll(name)
|
||||
b.context = name
|
||||
filename := path.Join(name, "Dockerfile")
|
||||
defer os.RemoveAll(tmpdirPath)
|
||||
b.contextPath = tmpdirPath
|
||||
filename := path.Join(tmpdirPath, "Dockerfile")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("Can't build a directory with no Dockerfile")
|
||||
}
|
||||
@@ -529,6 +618,9 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(fileBytes) == 0 {
|
||||
return "", ErrDockerfileEmpty
|
||||
}
|
||||
dockerfile := string(fileBytes)
|
||||
dockerfile = lineContinuation.ReplaceAllString(dockerfile, "")
|
||||
stepN := 0
|
||||
@@ -568,7 +660,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
}
|
||||
return b.image, nil
|
||||
}
|
||||
return "", fmt.Errorf("An error occurred during the build\n")
|
||||
return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile {
|
||||
|
||||
101
cgroups/cgroups.go
Normal file
101
cgroups/cgroups.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/mount"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
|
||||
|
||||
func FindCgroupMountpoint(subsystem string) (string, error) {
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "cgroup" {
|
||||
for _, opt := range strings.Split(mount.VfsOpts, ",") {
|
||||
if opt == subsystem {
|
||||
return mount.Mountpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem)
|
||||
}
|
||||
|
||||
// Returns the relative path to the cgroup docker is running in.
|
||||
func getThisCgroupDir(subsystem string) (string, error) {
|
||||
f, err := os.Open("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseCgroupFile(subsystem, f)
|
||||
}
|
||||
|
||||
func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
text := s.Text()
|
||||
parts := strings.Split(text, ":")
|
||||
if parts[1] == subsystem {
|
||||
return parts[2], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem)
|
||||
}
|
||||
|
||||
// Returns a list of pids for the given container.
|
||||
func GetPidsForContainer(id string) ([]int, error) {
|
||||
pids := []int{}
|
||||
|
||||
// memory is chosen randomly, any cgroup used by docker works
|
||||
subsystem := "memory"
|
||||
|
||||
cgroupRoot, err := FindCgroupMountpoint(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
cgroupDir, err := getThisCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
// With more recent lxc versions use, cgroup will be in lxc/
|
||||
filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks")
|
||||
}
|
||||
|
||||
output, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
for _, p := range strings.Split(string(output), "\n") {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
pid, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
27
cgroups/cgroups_test.go
Normal file
27
cgroups/cgroups_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupsContents = `11:hugetlb:/
|
||||
10:perf_event:/
|
||||
9:blkio:/
|
||||
8:net_cls:/
|
||||
7:freezer:/
|
||||
6:devices:/
|
||||
5:memory:/
|
||||
4:cpuacct,cpu:/
|
||||
3:cpuset:/
|
||||
2:name=systemd:/user.slice/user-1000.slice/session-16.scope`
|
||||
)
|
||||
|
||||
func TestParseCgroups(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(cgroupsContents))
|
||||
_, err := parseCgroupFile("blkio", r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
23
commands.go
23
commands.go
@@ -12,8 +12,8 @@ import (
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/pkg/term"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -238,6 +238,10 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
||||
}
|
||||
err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
|
||||
if jerr, ok := err.(*utils.JSONError); ok {
|
||||
// If no error code is set, default to 1
|
||||
if jerr.Code == 0 {
|
||||
jerr.Code = 1
|
||||
}
|
||||
return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||
}
|
||||
return err
|
||||
@@ -469,6 +473,13 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
||||
fmt.Fprintf(cli.out, "LXC Version: %s\n", remoteInfo.Get("LXCVersion"))
|
||||
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
|
||||
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
|
||||
|
||||
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
|
||||
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
|
||||
}
|
||||
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
|
||||
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
|
||||
}
|
||||
}
|
||||
|
||||
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
|
||||
@@ -1089,7 +1100,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
|
||||
|
||||
if err := pull(authConfig); err != nil {
|
||||
if err.Error() == registry.ErrLoginRequired.Error() {
|
||||
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
|
||||
fmt.Fprintln(cli.out, "\nPlease login prior to pull:")
|
||||
if err := cli.CmdLogin(endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1237,9 +1248,9 @@ func (cli *DockerCli) WalkTree(noTrunc bool, images *[]APIImages, byParent map[s
|
||||
cli.WalkTree(noTrunc, &subimages, byParent, prefix+" ", printNode)
|
||||
}
|
||||
} else {
|
||||
printNode(cli, noTrunc, image, prefix+"|─")
|
||||
printNode(cli, noTrunc, image, prefix+"├─")
|
||||
if subimages, exists := byParent[image.ID]; exists {
|
||||
cli.WalkTree(noTrunc, &subimages, byParent, prefix+"| ", printNode)
|
||||
cli.WalkTree(noTrunc, &subimages, byParent, prefix+"│ ", printNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1814,6 +1825,8 @@ func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Co
|
||||
flVolumes.Set(dstDir)
|
||||
binds = append(binds, bind)
|
||||
flVolumes.Delete(bind)
|
||||
} else if bind == "/" {
|
||||
return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2194,7 +2207,7 @@ func (cli *DockerCli) CmdSave(args ...string) error {
|
||||
}
|
||||
|
||||
func (cli *DockerCli) CmdLoad(args ...string) error {
|
||||
cmd := cli.Subcmd("load", "SOURCE", "Load an image from a tar archive")
|
||||
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -128,7 +128,9 @@ func TestParseRunVolumes(t *testing.T) {
|
||||
t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
|
||||
}
|
||||
|
||||
mustParse(t, "-v /")
|
||||
if _, _, err := parse(t, "-v /"); err == nil {
|
||||
t.Fatalf("Expected error, but got none")
|
||||
}
|
||||
|
||||
if _, _, err := parse(t, "-v /:/"); err == nil {
|
||||
t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
|
||||
|
||||
@@ -18,6 +18,7 @@ type DaemonConfig struct {
|
||||
DefaultIp net.IP
|
||||
InterContainerCommunication bool
|
||||
GraphDriver string
|
||||
Mtu int
|
||||
}
|
||||
|
||||
// ConfigFromJob creates and returns a new DaemonConfig object
|
||||
@@ -41,5 +42,10 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
|
||||
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
|
||||
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
|
||||
config.GraphDriver = job.Getenv("GraphDriver")
|
||||
if mtu := job.GetenvInt("Mtu"); mtu != -1 {
|
||||
config.Mtu = mtu
|
||||
} else {
|
||||
config.Mtu = DefaultNetworkMtu
|
||||
}
|
||||
return &config
|
||||
}
|
||||
|
||||
94
container.go
94
container.go
@@ -7,7 +7,8 @@ import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"github.com/dotcloud/docker/mount"
|
||||
"github.com/dotcloud/docker/pkg/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"github.com/kr/pty"
|
||||
"io"
|
||||
@@ -48,7 +49,6 @@ type Container struct {
|
||||
network *NetworkInterface
|
||||
NetworkSettings *NetworkSettings
|
||||
|
||||
SysInitPath string
|
||||
ResolvConfPath string
|
||||
HostnamePath string
|
||||
HostsPath string
|
||||
@@ -297,7 +297,11 @@ func (container *Container) generateEnvConfig(env []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioutil.WriteFile(container.EnvConfigPath(), data, 0600)
|
||||
p, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioutil.WriteFile(p, data, 0600)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -578,6 +582,7 @@ func (container *Container) Start() (err error) {
|
||||
params = append(params,
|
||||
"-g", network.Gateway,
|
||||
"-i", fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen),
|
||||
"-mtu", strconv.Itoa(container.runtime.config.Mtu),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -681,6 +686,45 @@ func (container *Container) Start() (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
root := container.RootfsPath()
|
||||
envPath, err := container.EnvConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mount docker specific files into the containers root fs
|
||||
if err := mount.Mount(runtime.sysInitPath, path.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mount.Mount(envPath, path.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mount.Mount(container.ResolvConfPath, path.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.HostnamePath != "" && container.HostsPath != "" {
|
||||
if err := mount.Mount(container.HostnamePath, path.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mount.Mount(container.HostsPath, path.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Mount user specified volumes
|
||||
|
||||
for r, v := range container.Volumes {
|
||||
mountAs := "ro"
|
||||
if container.VolumesRW[v] {
|
||||
mountAs = "rw"
|
||||
}
|
||||
|
||||
if err := mount.Mount(v, path.Join(root, r), "none", fmt.Sprintf("bind,%s", mountAs)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
container.cmd = exec.Command(params[0], params[1:]...)
|
||||
|
||||
// Setup logging of stdout and stderr to disk
|
||||
@@ -836,7 +880,7 @@ func (container *Container) createVolumes() error {
|
||||
volPath = path.Join(container.RootfsPath(), volPath)
|
||||
rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.RootfsPath())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(rootVolPath); err != nil {
|
||||
@@ -1358,6 +1402,32 @@ func (container *Container) GetImage() (*Image, error) {
|
||||
}
|
||||
|
||||
func (container *Container) Unmount() error {
|
||||
var (
|
||||
err error
|
||||
root = container.RootfsPath()
|
||||
mounts = []string{
|
||||
path.Join(root, "/.dockerinit"),
|
||||
path.Join(root, "/.dockerenv"),
|
||||
path.Join(root, "/etc/resolv.conf"),
|
||||
}
|
||||
)
|
||||
|
||||
if container.HostnamePath != "" && container.HostsPath != "" {
|
||||
mounts = append(mounts, path.Join(root, "/etc/hostname"), path.Join(root, "/etc/hosts"))
|
||||
}
|
||||
|
||||
for r := range container.Volumes {
|
||||
mounts = append(mounts, path.Join(root, r))
|
||||
}
|
||||
|
||||
for _, m := range mounts {
|
||||
if lastError := mount.Unmount(m); lastError != nil {
|
||||
err = lastError
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return container.runtime.Unmount(container)
|
||||
}
|
||||
|
||||
@@ -1377,8 +1447,20 @@ func (container *Container) jsonPath() string {
|
||||
return path.Join(container.root, "config.json")
|
||||
}
|
||||
|
||||
func (container *Container) EnvConfigPath() string {
|
||||
return path.Join(container.root, "config.env")
|
||||
func (container *Container) EnvConfigPath() (string, error) {
|
||||
p := path.Join(container.root, "config.env")
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
f.Close()
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (container *Container) lxcConfigPath() string {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#
|
||||
# This script provides supports completion of:
|
||||
# - commands and their options
|
||||
# - container ids
|
||||
# - container ids and names
|
||||
# - image repos and tags
|
||||
# - filepaths
|
||||
#
|
||||
@@ -25,21 +25,24 @@ __docker_containers_all()
|
||||
{
|
||||
local containers
|
||||
containers="$( docker ps -a -q )"
|
||||
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_running()
|
||||
{
|
||||
local containers
|
||||
containers="$( docker ps -q )"
|
||||
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_containers_stopped()
|
||||
{
|
||||
local containers
|
||||
containers="$( comm -13 <(docker ps -q | sort -u) <(docker ps -a -q | sort -u) )"
|
||||
COMPREPLY=( $( compgen -W "$containers" -- "$cur" ) )
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
COMPREPLY=( $( compgen -W "$names $containers" -- "$cur" ) )
|
||||
}
|
||||
|
||||
__docker_image_repos()
|
||||
@@ -70,8 +73,9 @@ __docker_containers_and_images()
|
||||
{
|
||||
local containers images
|
||||
containers="$( docker ps -a -q )"
|
||||
names="$( docker inspect -format '{{.Name}}' $containers | sed 's,^/,,' )"
|
||||
images="$( docker images | awk 'NR>1{print $1":"$2}' )"
|
||||
COMPREPLY=( $( compgen -W "$images $containers" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "$images $names $containers" -- "$cur" ) )
|
||||
__ltrim_colon_completions "$cur"
|
||||
}
|
||||
|
||||
|
||||
@@ -144,9 +144,9 @@ if [ -z "$strictDebootstrap" ]; then
|
||||
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
|
||||
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
|
||||
{
|
||||
aptGetClean='rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true'
|
||||
echo 'DPkg::Post-Invoke { "'$aptGetClean'"; };'
|
||||
echo 'APT::Update::Post-Invoke { "'$aptGetClean'"; };'
|
||||
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
|
||||
echo "DPkg::Post-Invoke { ${aptGetClean} };"
|
||||
echo "APT::Update::Post-Invoke { ${aptGetClean} };"
|
||||
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
|
||||
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
|
||||
# and remove the translations, too
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
<dict>
|
||||
<key>name</key>
|
||||
<string>Dockerfile</string>
|
||||
<key>fileTypes</key>
|
||||
<array>
|
||||
<string>Dockerfile</string>
|
||||
</array>
|
||||
<key>patterns</key>
|
||||
<array>
|
||||
<dict>
|
||||
@@ -40,6 +40,7 @@ func main() {
|
||||
flInterContainerComm = flag.Bool("icc", true, "Enable inter-container communication")
|
||||
flGraphDriver = flag.String("s", "", "Force the docker runtime to use a specific storage driver")
|
||||
flHosts = docker.NewListOpts(docker.ValidateHost)
|
||||
flMtu = flag.Int("mtu", docker.DefaultNetworkMtu, "Set the containers network mtu")
|
||||
)
|
||||
flag.Var(&flDns, "dns", "Force docker to use specific DNS servers")
|
||||
flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise")
|
||||
@@ -51,8 +52,13 @@ func main() {
|
||||
return
|
||||
}
|
||||
if flHosts.Len() == 0 {
|
||||
// If we do not have a host, default to unix socket
|
||||
flHosts.Set(fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET))
|
||||
defaultHost := os.Getenv("DOCKER_HOST")
|
||||
|
||||
if defaultHost == "" || *flDaemon {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)
|
||||
}
|
||||
flHosts.Set(defaultHost)
|
||||
}
|
||||
|
||||
if *bridgeName != "" && *bridgeIp != "" {
|
||||
@@ -69,6 +75,7 @@ func main() {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
eng, err := engine.New(*flRoot)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -86,6 +93,7 @@ func main() {
|
||||
job.Setenv("DefaultIp", *flDefaultIp)
|
||||
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
|
||||
job.Setenv("GraphDriver", *flGraphDriver)
|
||||
job.SetenvInt("Mtu", *flMtu)
|
||||
if err := job.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ directory:
|
||||
|
||||
* Linux: `pip install -r docs/requirements.txt`
|
||||
|
||||
* Mac OS X: `[sudo] pip-2.7 -r docs/requirements.txt`
|
||||
* Mac OS X: `[sudo] pip-2.7 install -r docs/requirements.txt`
|
||||
|
||||
###Alternative Installation: Docker Container
|
||||
|
||||
|
||||
@@ -26,10 +26,10 @@ Docker Remote API
|
||||
2. Versions
|
||||
===========
|
||||
|
||||
The current version of the API is 1.7
|
||||
The current version of the API is 1.8
|
||||
|
||||
Calling /images/<name>/insert is the same as calling
|
||||
/v1.7/images/<name>/insert
|
||||
/v1.8/images/<name>/insert
|
||||
|
||||
You can still call an old version of the api using
|
||||
/v1.0/images/<name>/insert
|
||||
|
||||
@@ -1078,7 +1078,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -1122,7 +1122,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -1093,7 +1093,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -1228,7 +1228,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -122,7 +122,6 @@ Create a container
|
||||
"AttachStdout":true,
|
||||
"AttachStderr":true,
|
||||
"PortSpecs":null,
|
||||
"Privileged": false,
|
||||
"Tty":false,
|
||||
"OpenStdin":false,
|
||||
"StdinOnce":false,
|
||||
@@ -136,10 +135,12 @@ Create a container
|
||||
"/tmp": {}
|
||||
},
|
||||
"VolumesFrom":"",
|
||||
"WorkingDir":""
|
||||
|
||||
"WorkingDir":"",
|
||||
"ExposedPorts":{
|
||||
"22/tcp": {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
@@ -364,10 +365,11 @@ Start a container
|
||||
{
|
||||
"Binds":["/tmp:/tmp"],
|
||||
"LxcConf":{"lxc.utsname":"docker"},
|
||||
"PortBindings":null
|
||||
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
|
||||
"Privileged":false,
|
||||
"PublishAllPorts":false
|
||||
}
|
||||
|
||||
|
||||
Binds need to reference Volumes that were defined during container creation.
|
||||
|
||||
**Example response**:
|
||||
@@ -1159,7 +1161,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -122,7 +122,6 @@ Create a container
|
||||
"AttachStdout":true,
|
||||
"AttachStderr":true,
|
||||
"PortSpecs":null,
|
||||
"Privileged": false,
|
||||
"Tty":false,
|
||||
"OpenStdin":false,
|
||||
"StdinOnce":false,
|
||||
@@ -132,12 +131,16 @@ Create a container
|
||||
],
|
||||
"Dns":null,
|
||||
"Image":"base",
|
||||
"Volumes":{},
|
||||
"Volumes":{
|
||||
"/tmp": {}
|
||||
},
|
||||
"VolumesFrom":"",
|
||||
"WorkingDir":""
|
||||
|
||||
"WorkingDir":"",
|
||||
"ExposedPorts":{
|
||||
"22/tcp": {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
@@ -151,6 +154,7 @@ Create a container
|
||||
}
|
||||
|
||||
:jsonparam config: the container's configuration
|
||||
:query name: Assign the specified name to the container. Must match ``/?[a-zA-Z0-9_-]+``.
|
||||
:statuscode 201: no error
|
||||
:statuscode 404: no such container
|
||||
:statuscode 406: impossible to attach (container not running)
|
||||
@@ -377,7 +381,10 @@ Start a container
|
||||
|
||||
{
|
||||
"Binds":["/tmp:/tmp"],
|
||||
"LxcConf":{"lxc.utsname":"docker"}
|
||||
"LxcConf":{"lxc.utsname":"docker"},
|
||||
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
|
||||
"PublishAllPorts":false,
|
||||
"Privileged":false
|
||||
}
|
||||
|
||||
**Example response**:
|
||||
@@ -1173,7 +1180,7 @@ Monitor Docker's events
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
POST /events?since=1374067924
|
||||
GET /events?since=1374067924
|
||||
|
||||
**Example response**:
|
||||
|
||||
|
||||
@@ -19,7 +19,8 @@ Docker Registry API
|
||||
- It doesn’t have a local database
|
||||
- It will be open-sourced at some point
|
||||
|
||||
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
|
||||
We expect that there will be multiple registries out there. To help to grasp
|
||||
the context, here are some examples of registries:
|
||||
|
||||
- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index.
|
||||
- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally.
|
||||
@@ -37,7 +38,10 @@ We expect that there will be multiple registries out there. To help to grasp the
|
||||
- local mount point;
|
||||
- remote docker addressed through SSH.
|
||||
|
||||
The latter would only require two new commands in docker, e.g. “registryget” and “registryput”, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g. with public keys).
|
||||
The latter would only require two new commands in docker, e.g. ``registryget``
|
||||
and ``registryput``, wrapping access to the local filesystem (and optionally
|
||||
doing consistency checks). Authentication and authorization are then delegated
|
||||
to SSH (e.g. with public keys).
|
||||
|
||||
2. Endpoints
|
||||
============
|
||||
|
||||
@@ -15,11 +15,13 @@ Registry & Index Spec
|
||||
---------
|
||||
|
||||
The Index is responsible for centralizing information about:
|
||||
|
||||
- User accounts
|
||||
- Checksums of the images
|
||||
- Public namespaces
|
||||
|
||||
The Index has different components:
|
||||
|
||||
- Web UI
|
||||
- Meta-data store (comments, stars, list public repositories)
|
||||
- Authentication service
|
||||
@@ -27,7 +29,7 @@ The Index has different components:
|
||||
|
||||
The index is authoritative for those information.
|
||||
|
||||
We expect that there will be only one instance of the index, run and managed by dotCloud.
|
||||
We expect that there will be only one instance of the index, run and managed by Docker Inc.
|
||||
|
||||
1.2 Registry
|
||||
------------
|
||||
@@ -53,12 +55,16 @@ We expect that there will be multiple registries out there. To help to grasp the
|
||||
- local mount point;
|
||||
- remote docker addressed through SSH.
|
||||
|
||||
The latter would only require two new commands in docker, e.g. “registryget” and “registryput”, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g. with public keys).
|
||||
The latter would only require two new commands in docker, e.g. ``registryget``
|
||||
and ``registryput``, wrapping access to the local filesystem (and optionally
|
||||
doing consistency checks). Authentication and authorization are then delegated
|
||||
to SSH (e.g. with public keys).
|
||||
|
||||
1.3 Docker
|
||||
----------
|
||||
|
||||
On top of being a runtime for LXC, Docker is the Registry client. It supports:
|
||||
|
||||
- Push / Pull on the registry
|
||||
- Client authentication on the Index
|
||||
|
||||
@@ -72,21 +78,33 @@ On top of being a runtime for LXC, Docker is the Registry client. It supports:
|
||||
|
||||
1. Contact the Index to know where I should download “samalba/busybox”
|
||||
2. Index replies:
|
||||
a. “samalba/busybox” is on Registry A
|
||||
b. here are the checksums for “samalba/busybox” (for all layers)
|
||||
a. ``samalba/busybox`` is on Registry A
|
||||
b. here are the checksums for ``samalba/busybox`` (for all layers)
|
||||
c. token
|
||||
3. Contact Registry A to receive the layers for “samalba/busybox” (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location.
|
||||
3. Contact Registry A to receive the layers for ``samalba/busybox`` (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location.
|
||||
4. registry contacts index to verify if token/user is allowed to download images
|
||||
5. Index returns true/false lettings registry know if it should proceed or error out
|
||||
6. Get the payload for all layers
|
||||
|
||||
It’s possible to run docker pull \https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there won’t be any checksum checks.
|
||||
It's possible to run:
|
||||
|
||||
Currently registry redirects to s3 urls for downloads, going forward all downloads need to be streamed through the registry. The Registry will then abstract the calls to S3 by a top-level class which implements sub-classes for S3 and local storage.
|
||||
.. code-block:: bash
|
||||
|
||||
Token is only returned when the 'X-Docker-Token' header is sent with request.
|
||||
docker pull https://<registry>/repositories/samalba/busybox
|
||||
|
||||
Basic Auth is required to pull private repos. Basic auth isn't required for pulling public repos, but if one is provided, it needs to be valid and for an active account.
|
||||
In this case, Docker bypasses the Index. However the security is not guaranteed
|
||||
(in case Registry A is corrupted) because there won’t be any checksum checks.
|
||||
|
||||
Currently registry redirects to s3 urls for downloads, going forward all
|
||||
downloads need to be streamed through the registry. The Registry will then
|
||||
abstract the calls to S3 by a top-level class which implements sub-classes for
|
||||
S3 and local storage.
|
||||
|
||||
Token is only returned when the ``X-Docker-Token`` header is sent with request.
|
||||
|
||||
Basic Auth is required to pull private repos. Basic auth isn't required for
|
||||
pulling public repos, but if one is provided, it needs to be valid and for an
|
||||
active account.
|
||||
|
||||
API (pulling repository foo/bar):
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -155,7 +173,9 @@ API (pulling repository foo/bar):
|
||||
|
||||
**Index can be replaced!** For a private Registry deployed, a custom Index can be used to serve and validate token according to different policies.
|
||||
|
||||
Docker computes the checksums and submit them to the Index at the end of the push. When a repository name does not have checksums on the Index, it means that the push is in progress (since checksums are submitted at the end).
|
||||
Docker computes the checksums and submit them to the Index at the end of the
|
||||
push. When a repository name does not have checksums on the Index, it means
|
||||
that the push is in progress (since checksums are submitted at the end).
|
||||
|
||||
API (pushing repos foo/bar):
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -237,10 +257,11 @@ API (pushing repos foo/bar):
|
||||
2.3 Delete
|
||||
----------
|
||||
|
||||
If you need to delete something from the index or registry, we need a nice clean way to do that. Here is the workflow.
|
||||
If you need to delete something from the index or registry, we need a nice
|
||||
clean way to do that. Here is the workflow.
|
||||
|
||||
1. Docker contacts the index to request a delete of a repository “samalba/busybox” (authentication required with user credentials)
|
||||
2. If authentication works and repository is valid, “samalba/busybox” is marked as deleted and a temporary token is returned
|
||||
1. Docker contacts the index to request a delete of a repository ``samalba/busybox`` (authentication required with user credentials)
|
||||
2. If authentication works and repository is valid, ``samalba/busybox`` is marked as deleted and a temporary token is returned
|
||||
3. Send a delete request to the registry for the repository (along with the token)
|
||||
4. Registry A contacts the Index to verify the token (token must corresponds to the repository name)
|
||||
5. Index validates the token. Registry A deletes the repository and everything associated to it.
|
||||
@@ -312,24 +333,40 @@ The Index has two main purposes (along with its fancy social features):
|
||||
|
||||
3.1 Without an Index
|
||||
--------------------
|
||||
Using the Registry without the Index can be useful to store the images on a private network without having to rely on an external entity controlled by dotCloud.
|
||||
|
||||
In this case, the registry will be launched in a special mode (--standalone? --no-index?). In this mode, the only thing which changes is that Registry will never contact the Index to verify a token. It will be the Registry owner responsibility to authenticate the user who pushes (or even pulls) an image using any mechanism (HTTP auth, IP based, etc...).
|
||||
Using the Registry without the Index can be useful to store the images on a
|
||||
private network without having to rely on an external entity controlled by
|
||||
Docker Inc.
|
||||
|
||||
In this scenario, the Registry is responsible for the security in case of data corruption since the checksums are not delivered by a trusted entity.
|
||||
In this case, the registry will be launched in a special mode (--standalone?
|
||||
--no-index?). In this mode, the only thing which changes is that Registry will
|
||||
never contact the Index to verify a token. It will be the Registry owner
|
||||
responsibility to authenticate the user who pushes (or even pulls) an image
|
||||
using any mechanism (HTTP auth, IP based, etc...).
|
||||
|
||||
As hinted previously, a standalone registry can also be implemented by any HTTP server handling GET/PUT requests (or even only GET requests if no write access is necessary).
|
||||
In this scenario, the Registry is responsible for the security in case of data
|
||||
corruption since the checksums are not delivered by a trusted entity.
|
||||
|
||||
As hinted previously, a standalone registry can also be implemented by any HTTP
|
||||
server handling GET/PUT requests (or even only GET requests if no write access
|
||||
is necessary).
|
||||
|
||||
3.2 With an Index
|
||||
-----------------
|
||||
|
||||
The Index data needed by the Registry are simple:
|
||||
|
||||
- Serve the checksums
|
||||
- Provide and authorize a Token
|
||||
|
||||
In the scenario of a Registry running on a private network with the need of centralizing and authorizing, it’s easy to use a custom Index.
|
||||
In the scenario of a Registry running on a private network with the need of
|
||||
centralizing and authorizing, it’s easy to use a custom Index.
|
||||
|
||||
The only challenge will be to tell Docker to contact (and trust) this custom Index. Docker will be configurable at some point to use a specific Index, it’ll be the private entity responsibility (basically the organization who uses Docker in a private environment) to maintain the Index and the Docker’s configuration among its consumers.
|
||||
The only challenge will be to tell Docker to contact (and trust) this custom
|
||||
Index. Docker will be configurable at some point to use a specific Index, it’ll
|
||||
be the private entity responsibility (basically the organization who uses
|
||||
Docker in a private environment) to maintain the Index and the Docker’s
|
||||
configuration among its consumers.
|
||||
|
||||
4. The API
|
||||
==========
|
||||
@@ -339,16 +376,22 @@ The first version of the api is available here: https://github.com/jpetazzo/dock
|
||||
4.1 Images
|
||||
----------
|
||||
|
||||
The format returned in the images is not defined here (for layer and json), basically because Registry stores exactly the same kind of information as Docker uses to manage them.
|
||||
The format returned in the images is not defined here (for layer and JSON),
|
||||
basically because Registry stores exactly the same kind of information as
|
||||
Docker uses to manage them.
|
||||
|
||||
The format of ancestry is a line-separated list of image ids, in age order. I.e. the image’s parent is on the last line, the parent of the parent on the next-to-last line, etc.; if the image has no parent, the file is empty.
|
||||
The format of ancestry is a line-separated list of image ids, in age order,
|
||||
i.e. the image’s parent is on the last line, the parent of the parent on the
|
||||
next-to-last line, etc.; if the image has no parent, the file is empty.
|
||||
|
||||
GET /v1/images/<image_id>/layer
|
||||
PUT /v1/images/<image_id>/layer
|
||||
GET /v1/images/<image_id>/json
|
||||
PUT /v1/images/<image_id>/json
|
||||
GET /v1/images/<image_id>/ancestry
|
||||
PUT /v1/images/<image_id>/ancestry
|
||||
.. code-block:: bash
|
||||
|
||||
GET /v1/images/<image_id>/layer
|
||||
PUT /v1/images/<image_id>/layer
|
||||
GET /v1/images/<image_id>/json
|
||||
PUT /v1/images/<image_id>/json
|
||||
GET /v1/images/<image_id>/ancestry
|
||||
PUT /v1/images/<image_id>/ancestry
|
||||
|
||||
4.2 Users
|
||||
---------
|
||||
@@ -393,7 +436,9 @@ PUT /v1/users/<username>
|
||||
|
||||
4.2.3 Login (Index)
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Does nothing else but asking for a user authentication. Can be used to validate credentials. HTTP Basic Auth for now, maybe change in future.
|
||||
|
||||
Does nothing else but asking for a user authentication. Can be used to validate
|
||||
credentials. HTTP Basic Auth for now, maybe change in future.
|
||||
|
||||
GET /v1/users
|
||||
|
||||
@@ -405,7 +450,10 @@ GET /v1/users
|
||||
4.3 Tags (Registry)
|
||||
-------------------
|
||||
|
||||
The Registry does not know anything about users. Even though repositories are under usernames, it’s just a namespace for the registry. Allowing us to implement organizations or different namespaces per user later, without modifying the Registry’s API.
|
||||
The Registry does not know anything about users. Even though repositories are
|
||||
under usernames, it’s just a namespace for the registry. Allowing us to
|
||||
implement organizations or different namespaces per user later, without
|
||||
modifying the Registry’s API.
|
||||
|
||||
The following naming restrictions apply:
|
||||
|
||||
@@ -439,7 +487,10 @@ DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
|
||||
4.4 Images (Index)
|
||||
------------------
|
||||
|
||||
For the Index to “resolve” the repository name to a Registry location, it uses the X-Docker-Endpoints header. In other terms, this requests always add a “X-Docker-Endpoints” to indicate the location of the registry which hosts this repository.
|
||||
For the Index to “resolve” the repository name to a Registry location, it uses
|
||||
the X-Docker-Endpoints header. In other terms, this requests always add a
|
||||
``X-Docker-Endpoints`` to indicate the location of the registry which hosts this
|
||||
repository.
|
||||
|
||||
4.4.1 Get the images
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
@@ -484,17 +535,20 @@ Return 202 OK
|
||||
======================
|
||||
|
||||
It’s possible to chain Registries server for several reasons:
|
||||
|
||||
- Load balancing
|
||||
- Delegate the next request to another server
|
||||
|
||||
When a Registry is a reference for a repository, it should host the entire images chain in order to avoid breaking the chain during the download.
|
||||
When a Registry is a reference for a repository, it should host the entire
|
||||
images chain in order to avoid breaking the chain during the download.
|
||||
|
||||
The Index and Registry use this mechanism to redirect on one or the other.
|
||||
|
||||
Example with an image download:
|
||||
On every request, a special header can be returned:
|
||||
|
||||
X-Docker-Endpoints: server1,server2
|
||||
On every request, a special header can be returned::
|
||||
|
||||
X-Docker-Endpoints: server1,server2
|
||||
|
||||
On the next request, the client will always pick a server from this list.
|
||||
|
||||
@@ -504,7 +558,8 @@ On the next request, the client will always pick a server from this list.
|
||||
6.1 On the Index
|
||||
-----------------
|
||||
|
||||
The Index supports both “Basic” and “Token” challenges. Usually when there is a “401 Unauthorized”, the Index replies this::
|
||||
The Index supports both “Basic” and “Token” challenges. Usually when there is a
|
||||
``401 Unauthorized``, the Index replies this::
|
||||
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Basic realm="auth required",Token
|
||||
@@ -543,11 +598,13 @@ The Registry only supports the Token challenge::
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Token
|
||||
|
||||
The only way is to provide a token on “401 Unauthorized” responses::
|
||||
The only way is to provide a token on ``401 Unauthorized`` responses::
|
||||
|
||||
Authorization: Token signature=123abc,repository=”foo/bar”,access=read
|
||||
Authorization: Token signature=123abc,repository="foo/bar",access=read
|
||||
|
||||
Usually, the Registry provides a Cookie when a Token verification succeeded. Every time the Registry passes a Cookie, you have to pass it back the same cookie.::
|
||||
Usually, the Registry provides a Cookie when a Token verification succeeded.
|
||||
Every time the Registry passes a Cookie, you have to pass it back the same
|
||||
cookie.::
|
||||
|
||||
200 OK
|
||||
Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly
|
||||
|
||||
@@ -12,7 +12,7 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
|
||||
$ sudo docker
|
||||
Usage: docker [OPTIONS] COMMAND [arg...]
|
||||
-H=[unix:///var/run/docker.sock]: tcp://host:port to bind/connect to or unix://path/to/socket to use
|
||||
-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind/connect to or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used.
|
||||
|
||||
A self-sufficient runtime for linux containers.
|
||||
|
||||
@@ -27,7 +27,7 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
|
||||
Usage of docker:
|
||||
-D=false: Enable debug mode
|
||||
-H=[unix:///var/run/docker.sock]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise
|
||||
-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use. When host=[0.0.0.0], port=[4243] or path=[/var/run/docker.sock] is omitted, default values are used.
|
||||
-api-enable-cors=false: Enable CORS headers in the remote API
|
||||
-b="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
|
||||
-bip="": Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of -b
|
||||
@@ -37,19 +37,32 @@ To list available commands, either run ``docker`` with no parameters or execute
|
||||
-icc=true: Enable inter-container communication
|
||||
-ip="0.0.0.0": Default IP address to use when binding container ports
|
||||
-iptables=true: Disable docker's addition of iptables rules
|
||||
-mtu=1500: Set the containers network mtu
|
||||
-p="/var/run/docker.pid": Path to use for daemon PID file
|
||||
-r=true: Restart previously running containers
|
||||
-s="": Force the docker runtime to use a specific storage driver
|
||||
-v=false: Print version information and quit
|
||||
|
||||
The docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
|
||||
The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
|
||||
daemon and client. To run the daemon you provide the ``-d`` flag.
|
||||
|
||||
To force docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``
|
||||
To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``.
|
||||
|
||||
To set the dns server for all docker containers, use ``docker -d -dns 8.8.8.8``
|
||||
To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``.
|
||||
|
||||
To run the daemon with debug output, use ``docker -d -D``.
|
||||
|
||||
The docker client will also honor the ``DOCKER_HOST`` environment variable to set
|
||||
the ``-H`` flag for the client.
|
||||
|
||||
::
|
||||
|
||||
docker -H tcp://0.0.0.0:4243 ps
|
||||
# or
|
||||
export DOCKER_HOST="tcp://0.0.0.0:4243"
|
||||
docker ps
|
||||
# both are equal
|
||||
|
||||
To run the daemon with debug output, use ``docker -d -D``
|
||||
|
||||
.. _cli_attach:
|
||||
|
||||
@@ -68,11 +81,11 @@ To run the daemon with debug output, use ``docker -d -D``
|
||||
You can detach from the container again (and leave it running) with
|
||||
``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
|
||||
the Docker client when it quits. When you detach from the container's
|
||||
process the exit code will be retuned to the client.
|
||||
process the exit code will be returned to the client.
|
||||
|
||||
To stop a container, use ``docker stop``
|
||||
To stop a container, use ``docker stop``.
|
||||
|
||||
To kill the container, use ``docker kill``
|
||||
To kill the container, use ``docker kill``.
|
||||
|
||||
.. _cli_attach_examples:
|
||||
|
||||
@@ -128,12 +141,11 @@ Examples:
|
||||
-no-cache: Do not use the cache when building the image.
|
||||
-rm: Remove intermediate containers after a successful build
|
||||
|
||||
The files at PATH or URL are called the "context" of the build. The
|
||||
build process may refer to any of the files in the context, for
|
||||
example when using an :ref:`ADD <dockerfile_add>` instruction. When a
|
||||
single ``Dockerfile`` is given as URL, then no context is set. When a
|
||||
git repository is set as URL, then the repository is used as the
|
||||
context
|
||||
The files at ``PATH`` or ``URL`` are called the "context" of the build. The
|
||||
build process may refer to any of the files in the context, for example when
|
||||
using an :ref:`ADD <dockerfile_add>` instruction. When a single ``Dockerfile``
|
||||
is given as ``URL``, then no context is set. When a Git repository is set as
|
||||
``URL``, then the repository is used as the context
|
||||
|
||||
.. _cli_build_examples:
|
||||
|
||||
@@ -168,13 +180,13 @@ Examples:
|
||||
---> f52f38b7823e
|
||||
Successfully built f52f38b7823e
|
||||
|
||||
This example specifies that the PATH is ``.``, and so all the files in
|
||||
the local directory get tar'd and sent to the Docker daemon. The PATH
|
||||
This example specifies that the ``PATH`` is ``.``, and so all the files in
|
||||
the local directory get tar'd and sent to the Docker daemon. The ``PATH``
|
||||
specifies where to find the files for the "context" of the build on
|
||||
the Docker daemon. Remember that the daemon could be running on a
|
||||
remote machine and that no parsing of the Dockerfile happens at the
|
||||
remote machine and that no parsing of the ``Dockerfile`` happens at the
|
||||
client side (where you're running ``docker build``). That means that
|
||||
*all* the files at PATH get sent, not just the ones listed to
|
||||
*all* the files at ``PATH`` get sent, not just the ones listed to
|
||||
:ref:`ADD <dockerfile_add>` in the ``Dockerfile``.
|
||||
|
||||
The transfer of context from the local machine to the Docker daemon is
|
||||
@@ -197,16 +209,16 @@ tag will be ``2.0``
|
||||
|
||||
This will read a ``Dockerfile`` from *stdin* without context. Due to
|
||||
the lack of a context, no contents of any local directory will be sent
|
||||
to the ``docker`` daemon. Since there is no context, a Dockerfile
|
||||
to the ``docker`` daemon. Since there is no context, a ``Dockerfile``
|
||||
``ADD`` only works if it refers to a remote URL.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker build github.com/creack/docker-firefox
|
||||
|
||||
This will clone the Github repository and use the cloned repository as
|
||||
This will clone the GitHub repository and use the cloned repository as
|
||||
context. The ``Dockerfile`` at the root of the repository is used as
|
||||
``Dockerfile``. Note that you can specify an arbitrary git repository
|
||||
``Dockerfile``. Note that you can specify an arbitrary Git repository
|
||||
by using the ``git://`` schema.
|
||||
|
||||
|
||||
@@ -247,7 +259,7 @@ Change the command that a container runs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sometimes you have an application container running just a service and you need
|
||||
to make a quick change (run bash?) and then change it back.
|
||||
to make a quick change and then change it back.
|
||||
|
||||
In this example, we run a container with ``ls`` and then change the image to
|
||||
run ``ls /etc``.
|
||||
@@ -270,9 +282,9 @@ Full -run example
|
||||
The ``-run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
|
||||
or ``config`` when running ``docker inspect IMAGEID``.
|
||||
|
||||
(multiline is ok within a single quote ``'``)
|
||||
(Multiline is okay within a single quote ``'``)
|
||||
|
||||
::
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker commit -run='
|
||||
{
|
||||
@@ -315,7 +327,7 @@ or ``config`` when running ``docker inspect IMAGEID``.
|
||||
|
||||
Copy files/folders from the containers filesystem to the host
|
||||
path. Paths are relative to the root of the filesystem.
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker cp 7bb0e258aefe:/etc/debian_version .
|
||||
@@ -329,7 +341,7 @@ or ``config`` when running ``docker inspect IMAGEID``.
|
||||
::
|
||||
|
||||
Usage: docker diff CONTAINER
|
||||
|
||||
|
||||
List the changed files and directories in a container's filesystem
|
||||
|
||||
There are 3 events that are listed in the 'diff':
|
||||
@@ -338,7 +350,7 @@ There are 3 events that are listed in the 'diff':
|
||||
2. ```D``` - Delete
|
||||
3. ```C``` - Change
|
||||
|
||||
for example:
|
||||
For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -366,7 +378,7 @@ for example:
|
||||
Usage: docker events
|
||||
|
||||
Get real time events from the server
|
||||
|
||||
|
||||
-since="": Show previously created events and then stream.
|
||||
(either seconds since epoch, or date string as below)
|
||||
|
||||
@@ -429,8 +441,8 @@ Show events in the past from a specified time
|
||||
Usage: docker export CONTAINER
|
||||
|
||||
Export the contents of a filesystem as a tar archive to STDOUT
|
||||
|
||||
for example:
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -450,7 +462,7 @@ for example:
|
||||
-notrunc=false: Don't truncate output
|
||||
-q=false: only show numeric IDs
|
||||
|
||||
To see how the docker:latest image was built:
|
||||
To see how the ``docker:latest`` image was built:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -482,7 +494,7 @@ To see how the docker:latest image was built:
|
||||
d5e85dc5b1d8 2 weeks ago /bin/sh -c apt-get update
|
||||
13e642467c11 2 weeks ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
|
||||
ae6dde92a94e 2 weeks ago /bin/sh -c #(nop) MAINTAINER Solomon Hykes <solomon@dotcloud.com>
|
||||
ubuntu:12.04 6 months ago
|
||||
ubuntu:12.04 6 months ago
|
||||
|
||||
.. _cli_images:
|
||||
|
||||
@@ -500,7 +512,7 @@ To see how the docker:latest image was built:
|
||||
-q=false: only show numeric IDs
|
||||
-tree=false: output graph in tree format
|
||||
-viz=false: output graph in graphviz format
|
||||
|
||||
|
||||
Listing the most recently created images
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -553,15 +565,15 @@ Displaying image hierarchy
|
||||
|
||||
$ sudo docker images -tree
|
||||
|
||||
|─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
|
||||
├─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
|
||||
└─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
|
||||
└─b750fe79269d Size: 24.65 kB (virtual 180.1 MB) Tags: ubuntu:12.10,ubuntu:quantal
|
||||
|─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB)
|
||||
| └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB)
|
||||
| └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB)
|
||||
| └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB)
|
||||
| └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB)
|
||||
| └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest
|
||||
├─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB)
|
||||
│ └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB)
|
||||
│ └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB)
|
||||
│ └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB)
|
||||
│ └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB)
|
||||
│ └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest
|
||||
└─17e74ac162d8 Size: 53.93 kB (virtual 180.2 MB)
|
||||
└─339a3f56b760 Size: 24.65 kB (virtual 180.2 MB)
|
||||
└─904fcc40e34d Size: 96.7 MB (virtual 276.9 MB)
|
||||
@@ -588,10 +600,9 @@ Displaying image hierarchy
|
||||
(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
|
||||
|
||||
At this time, the URL must start with ``http`` and point to a single
|
||||
file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
|
||||
file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a
|
||||
root filesystem. If you would like to import from a local directory or
|
||||
archive, you can use the ``-`` parameter to take the data from
|
||||
standard in.
|
||||
archive, you can use the ``-`` parameter to take the data from *stdin*.
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
@@ -601,24 +612,30 @@ Import from a remote location
|
||||
|
||||
This will create a new untagged image.
|
||||
|
||||
``$ sudo docker import http://example.com/exampleimage.tgz``
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker import http://example.com/exampleimage.tgz
|
||||
|
||||
Import from a local file
|
||||
........................
|
||||
|
||||
Import to docker via pipe and standard in
|
||||
Import to docker via pipe and *stdin*.
|
||||
|
||||
``$ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new``
|
||||
.. code-block:: bash
|
||||
|
||||
$ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new
|
||||
|
||||
Import from a local directory
|
||||
.............................
|
||||
|
||||
``$ sudo tar -c . | docker import - exampleimagedir``
|
||||
.. code-block:: bash
|
||||
|
||||
Note the ``sudo`` in this example -- you must preserve the ownership
|
||||
of the files (especially root ownership) during the archiving with
|
||||
tar. If you are not root (or sudo) when you tar, then the ownerships
|
||||
might not get preserved.
|
||||
$ sudo tar -c . | docker import - exampleimagedir
|
||||
|
||||
Note the ``sudo`` in this example -- you must preserve the ownership of the
|
||||
files (especially root ownership) during the archiving with tar. If you are not
|
||||
root (or the sudo command) when you tar, then the ownerships might not get
|
||||
preserved.
|
||||
|
||||
.. _cli_info:
|
||||
|
||||
@@ -657,16 +674,16 @@ might not get preserved.
|
||||
|
||||
Insert a file from URL in the IMAGE at PATH
|
||||
|
||||
Use the specified IMAGE as the parent for a new image which adds a
|
||||
:ref:`layer <layer_def>` containing the new file. ``insert`` does not modify
|
||||
the original image, and the new image has the contents of the parent image,
|
||||
plus the new file.
|
||||
Use the specified ``IMAGE`` as the parent for a new image which adds a
|
||||
:ref:`layer <layer_def>` containing the new file. The ``insert`` command does
|
||||
not modify the original image, and the new image has the contents of the parent
|
||||
image, plus the new file.
|
||||
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
||||
Insert file from github
|
||||
Insert file from GitHub
|
||||
.......................
|
||||
|
||||
.. code-block:: bash
|
||||
@@ -681,16 +698,16 @@ Insert file from github
|
||||
|
||||
::
|
||||
|
||||
Usage: docker inspect [OPTIONS] CONTAINER
|
||||
Usage: docker inspect CONTAINER|IMAGE [CONTAINER|IMAGE...]
|
||||
|
||||
Return low-level information on a container
|
||||
Return low-level information on a container/image
|
||||
|
||||
-format="": template to output results
|
||||
-format="": Format the output using the given go template.
|
||||
|
||||
By default, this will render all results in a JSON array. If a format
|
||||
is specified, the given template will be executed for each result.
|
||||
|
||||
Go's `text/template <http://golang.org/pkg/text/template/>` package
|
||||
Go's `text/template <http://golang.org/pkg/text/template/>`_ package
|
||||
describes all the details of the format.
|
||||
|
||||
Examples
|
||||
@@ -795,14 +812,14 @@ Known Issues (kill)
|
||||
|
||||
Fetch the logs of a container
|
||||
|
||||
``docker logs`` is a convenience which batch-retrieves whatever logs
|
||||
are present at the time of execution. This does not guarantee
|
||||
execution order when combined with a ``docker run`` (i.e. your run may
|
||||
not have generated any logs at the time you execute ``docker logs``).
|
||||
The ``docker logs`` command is a convenience which batch-retrieves whatever
|
||||
logs are present at the time of execution. This does not guarantee execution
|
||||
order when combined with a ``docker run`` (i.e. your run may not have generated
|
||||
any logs at the time you execute ``docker logs``).
|
||||
|
||||
``docker logs -f`` combines ``docker logs`` and ``docker attach``: it
|
||||
will first return all logs from the beginning and then continue
|
||||
streaming new output from the container's stdout and stderr.
|
||||
The ``docker logs -f`` command combines ``docker logs`` and ``docker attach``:
|
||||
it will first return all logs from the beginning and then continue streaming
|
||||
new output from the container's stdout and stderr.
|
||||
|
||||
|
||||
.. _cli_port:
|
||||
@@ -940,7 +957,7 @@ Removing tagged images
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Images can be removed either by their short or long ID's, or their image names.
|
||||
If an image has more than one name, each of them needs to be removed before the
|
||||
If an image has more than one name, each of them needs to be removed before the
|
||||
image is removed.
|
||||
|
||||
.. code-block:: bash
|
||||
@@ -952,7 +969,7 @@ image is removed.
|
||||
test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
|
||||
$ sudo docker rmi fd484f19954f
|
||||
Error: Conflict, fd484f19954f wasn't deleted
|
||||
Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories
|
||||
2013/12/11 05:47:16 Error: failed to remove one or more images
|
||||
|
||||
$ sudo docker rmi test1
|
||||
@@ -1004,13 +1021,14 @@ image is removed.
|
||||
-link="": Add link to another container (name:alias)
|
||||
-name="": Assign the specified name to the container. If no name is specific docker will generate a random name
|
||||
-P=false: Publish all exposed ports to the host interfaces
|
||||
|
||||
``'docker run'`` first ``'creates'`` a writeable container layer over
|
||||
the specified image, and then ``'starts'`` it using the specified
|
||||
command. That is, ``'docker run'`` is equivalent to the API
|
||||
``/containers/create`` then ``/containers/(id)/start``.
|
||||
|
||||
``docker run`` can be used in combination with ``docker commit`` to :ref:`change the command that a container runs <cli_commit_examples>`.
|
||||
The ``docker run`` command first ``creates`` a writeable container layer over
|
||||
the specified image, and then ``starts`` it using the specified command. That
|
||||
is, ``docker run`` is equivalent to the API ``/containers/create`` then
|
||||
``/containers/(id)/start``.
|
||||
|
||||
The ``docker run`` command can be used in combination with ``docker commit`` to
|
||||
:ref:`change the command that a container runs <cli_commit_examples>`.
|
||||
|
||||
Known Issues (run -volumes-from)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -1026,10 +1044,10 @@ Examples:
|
||||
|
||||
$ sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
|
||||
|
||||
This will create a container and print "test" to the console. The
|
||||
``cidfile`` flag makes docker attempt to create a new file and write the
|
||||
container ID to it. If the file exists already, docker will return an
|
||||
error. Docker will close this file when docker run exits.
|
||||
This will create a container and print ``test`` to the console. The
|
||||
``cidfile`` flag makes Docker attempt to create a new file and write the
|
||||
container ID to it. If the file exists already, Docker will return an
|
||||
error. Docker will close this file when ``docker run`` exits.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -1063,7 +1081,7 @@ use-cases, like running Docker within Docker.
|
||||
$ sudo docker run -w /path/to/dir/ -i -t ubuntu pwd
|
||||
|
||||
The ``-w`` lets the command being executed inside directory given,
|
||||
here /path/to/dir/. If the path does not exists it is created inside the
|
||||
here ``/path/to/dir/``. If the path does not exists it is created inside the
|
||||
container.
|
||||
|
||||
.. code-block:: bash
|
||||
@@ -1080,7 +1098,7 @@ using the container, but inside the current working directory.
|
||||
|
||||
$ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
|
||||
|
||||
This binds port ``8080`` of the container to port ``80`` on 127.0.0.1 of the
|
||||
This binds port ``8080`` of the container to port ``80`` on ``127.0.0.1`` of the
|
||||
host machine. :ref:`port_redirection` explains in detail how to manipulate ports
|
||||
in Docker.
|
||||
|
||||
@@ -1114,11 +1132,31 @@ to the newly created container.
|
||||
$ sudo docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
|
||||
|
||||
The ``-volumes-from`` flag mounts all the defined volumes from the
|
||||
refrence containers. Containers can be specified by a comma seperated
|
||||
referenced containers. Containers can be specified by a comma seperated
|
||||
list or by repetitions of the ``-volumes-from`` argument. The container
|
||||
id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
|
||||
ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
|
||||
read-only or read-write mode, respectively. By default, the volumes are mounted
|
||||
in the same mode (rw or ro) as the reference container.
|
||||
in the same mode (read write or read only) as the reference container.
|
||||
|
||||
A complete example
|
||||
..................
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ sudo docker run -d -name static static-web-files sh
|
||||
$ sudo docker run -d -expose=8098 -name riak riakserver
|
||||
$ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro -name app appserver
|
||||
$ sudo docker run -d -p 1443:443 -dns=dns.dev.org -v /var/log/httpd -volumes-from static -link riak -link app -h www.sven.dev.org -name web webserver
|
||||
$ sudo docker run -t -i -rm -volumes-from web -w /var/log/httpd busybox tail -f access.log
|
||||
|
||||
This example shows 5 containers that might be set up to test a web application change:
|
||||
|
||||
1. Start a pre-prepared volume image ``static-web-files`` (in the background) that has CSS, image and static HTML in it, (with a ``VOLUME`` instruction in the ``Dockerfile`` to allow the web server to use those files);
|
||||
2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it;
|
||||
3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``;
|
||||
4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate;
|
||||
5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``-rm`` option means that when the container exits, the container's layer is removed.
|
||||
|
||||
|
||||
.. _cli_save:
|
||||
|
||||
@@ -1205,7 +1243,7 @@ The main process inside the container will receive SIGTERM, and after a grace pe
|
||||
``version``
|
||||
-----------
|
||||
|
||||
Show the version of the docker client, daemon, and latest released version.
|
||||
Show the version of the Docker client, daemon, and latest released version.
|
||||
|
||||
|
||||
.. _cli_wait:
|
||||
|
||||
@@ -136,7 +136,7 @@ You can run an interactive session in the newly built container:
|
||||
|
||||
|
||||
Extra Step: Build and view the Documentation
|
||||
-------------------------------------------
|
||||
--------------------------------------------
|
||||
|
||||
If you want to read the documentation from a local website, or are making changes
|
||||
to it, you can build the documentation and then serve it by:
|
||||
|
||||
@@ -94,5 +94,13 @@ The password is ``screencast``.
|
||||
$ ifconfig
|
||||
$ ssh root@192.168.33.10 -p 49154
|
||||
# Thanks for watching, Thatcher thatcher@dotcloud.com
|
||||
|
||||
Update:
|
||||
-------
|
||||
|
||||
For Ubuntu 13.10 using stackbrew/ubuntu, you may need do these additional steps:
|
||||
|
||||
1. change /etc/pam.d/sshd, pam_loginuid line 'required' to 'optional'
|
||||
2. echo LANG=\"en_US.UTF-8\" > /etc/default/locale
|
||||
|
||||
|
||||
|
||||
@@ -111,7 +111,7 @@ What does Docker add to just plain LXC?
|
||||
registry to store and transfer private containers, for internal
|
||||
server deployments for example.
|
||||
|
||||
* *Tool ecosystem.*
|
||||
* *Tool ecosystem.*
|
||||
Docker defines an API for automating and customizing the
|
||||
creation and deployment of containers. There are a huge number
|
||||
of tools integrating with Docker to extend its
|
||||
@@ -122,6 +122,11 @@ What does Docker add to just plain LXC?
|
||||
(Jenkins, Strider, Travis), etc. Docker is rapidly establishing
|
||||
itself as the standard for container-based tooling.
|
||||
|
||||
What is different between a Docker container and a VM?
|
||||
......................................................
|
||||
|
||||
There's a great StackOverflow answer `showing the differences <http://stackoverflow.com/questions/16047306/how-is-docker-io-different-from-a-normal-virtual-machine>`_.
|
||||
|
||||
Do I lose my data when the container exits?
|
||||
...........................................
|
||||
|
||||
@@ -129,6 +134,53 @@ Not at all! Any data that your application writes to disk gets preserved
|
||||
in its container until you explicitly delete the container. The file
|
||||
system for the container persists even after the container halts.
|
||||
|
||||
How far do Docker containers scale?
|
||||
...................................
|
||||
|
||||
Some of the largest server farms in the world today are based on containers.
|
||||
Large web deployments like Google and Twitter, and platform providers such as
|
||||
Heroku and dotCloud all run on container technology, at a scale of hundreds of
|
||||
thousands or even millions of containers running in parallel.
|
||||
|
||||
How do I connect Docker containers?
|
||||
...................................
|
||||
|
||||
Currently the recommended way to link containers is via the `link` primitive.
|
||||
You can see details of how to `work with links here
|
||||
<http://docs.docker.io/en/latest/use/working_with_links_names/>`_.
|
||||
|
||||
Also of useful when enabling more flexible service portability is the
|
||||
`Ambassador linking pattern
|
||||
<http://docs.docker.io/en/latest/use/ambassador_pattern_linking/>`_.
|
||||
|
||||
How do I run more than one process in a Docker container?
|
||||
.........................................................
|
||||
|
||||
Any capable process supervisor such as http://supervisord.org/, runit, s6, or
|
||||
daemontools can do the trick. Docker will start up the process management
|
||||
daemon which will then fork to run additional processes. As long as the
|
||||
processor manager daemon continues to run, the container will continue to as
|
||||
well. You can see a more substantial example `that uses supervisord here
|
||||
<http://docs.docker.io/en/latest/examples/using_supervisord/>`_.
|
||||
|
||||
What platforms does Docker run on?
|
||||
..................................
|
||||
|
||||
Linux:
|
||||
|
||||
- Ubuntu 12.04, 13.04 et al
|
||||
- Fedora 19/20+
|
||||
- RHEL 6.5+
|
||||
- Centos 6+
|
||||
- Gento
|
||||
- ArchLinux
|
||||
|
||||
Cloud:
|
||||
|
||||
- Amazon EC2
|
||||
- Google Compute Engine
|
||||
- Rackspace
|
||||
|
||||
Can I help by adding some questions and answers?
|
||||
................................................
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ currently in active development, so this documentation will change
|
||||
frequently.
|
||||
|
||||
For an overview of Docker, please see the `Introduction
|
||||
<http://www.docker.io>`_. When you're ready to start working with
|
||||
<http://www.docker.io/learn_more/>`_. When you're ready to start working with
|
||||
Docker, we have a `quick start <http://www.docker.io/gettingstarted>`_
|
||||
and a more in-depth guide to :ref:`ubuntu_linux` and other
|
||||
:ref:`installation_list` paths including prebuilt binaries,
|
||||
|
||||
@@ -26,18 +26,13 @@ Amazon QuickStart
|
||||
<https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
|
||||
on your AWS Console.
|
||||
|
||||
* When picking the source AMI for your instance type, select "Community
|
||||
AMIs".
|
||||
* Click the ``Select`` button for a 64Bit Ubuntu image. For example: Ubuntu Server 12.04.3 LTS
|
||||
|
||||
* Search for ``amd64 precise``. Pick one of the amd64 Ubuntu images.
|
||||
|
||||
* If you choose a EBS enabled AMI, you'll also be able to launch a
|
||||
* For testing you can use the default (possibly free)
|
||||
``t1.micro`` instance (more info on `pricing
|
||||
<http://aws.amazon.com/en/ec2/pricing/>`_). ``t1.micro`` instances are
|
||||
eligible for Amazon's Free Usage Tier.
|
||||
<http://aws.amazon.com/en/ec2/pricing/>`_).
|
||||
|
||||
* When you click select you'll be taken to the instance setup, and you're one
|
||||
click away from having your Ubuntu VM up and running.
|
||||
* Click the ``Next: Configure Instance Details`` button at the bottom right.
|
||||
|
||||
2. **Tell CloudInit to install Docker:**
|
||||
|
||||
|
||||
@@ -21,6 +21,11 @@ Check Your Kernel
|
||||
|
||||
Your host's Linux kernel must meet the Docker :ref:`kernel`
|
||||
|
||||
Check for User Space Tools
|
||||
--------------------------
|
||||
|
||||
You must have a working installation of the `lxc <http://linuxcontainers.org>`_ utilities and library.
|
||||
|
||||
Get the docker binary:
|
||||
----------------------
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
:title: Requirements and Installation on Fedora
|
||||
:description: Please note this project is currently under heavy development. It should not be used in production.
|
||||
:keywords: Docker, Docker documentation, fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
|
||||
:keywords: Docker, Docker documentation, Fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
|
||||
|
||||
.. _fedora:
|
||||
|
||||
@@ -18,13 +18,34 @@ architecture.
|
||||
Installation
|
||||
------------
|
||||
|
||||
The ``docker-io`` package provides Docker on Fedora.
|
||||
|
||||
|
||||
If you have the (unrelated) ``docker`` package installed already, it will
|
||||
conflict with ``docker-io``. There's a `bug report`_ filed for it.
|
||||
To proceed with ``docker-io`` installation on Fedora 19, please remove
|
||||
``docker`` first.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y remove docker
|
||||
|
||||
For Fedora 20 and later, the ``wmdocker`` package will provide the same
|
||||
functionality as ``docker`` and will also not conflict with ``docker-io``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install wmdocker
|
||||
sudo yum -y remove docker
|
||||
|
||||
Install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo yum -y install docker-io
|
||||
|
||||
To update the ``docker-io`` package
|
||||
|
||||
To update the ``docker-io`` package:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -46,7 +67,9 @@ Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
sudo docker run -i -t fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676
|
||||
|
||||
|
||||
80
docs/sources/installation/frugalware.rst
Normal file
80
docs/sources/installation/frugalware.rst
Normal file
@@ -0,0 +1,80 @@
|
||||
:title: Installation on FrugalWare
|
||||
:description: Docker installation on FrugalWare.
|
||||
:keywords: frugalware linux, virtualization, docker, documentation, installation
|
||||
|
||||
.. _frugalware:
|
||||
|
||||
FrugalWare
|
||||
==========
|
||||
|
||||
.. include:: install_header.inc
|
||||
|
||||
.. include:: install_unofficial.inc
|
||||
|
||||
Installing on FrugalWare is handled via the official packages:
|
||||
|
||||
* `lxc-docker i686 <http://www.frugalware.org/packages/200141>`_
|
||||
|
||||
* `lxc-docker x86_64 <http://www.frugalware.org/packages/200130>`_
|
||||
|
||||
The `lxc-docker` package will install the latest tagged version of Docker.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Docker depends on several packages which are specified as dependencies in
|
||||
the packages. The core dependencies are:
|
||||
|
||||
* systemd
|
||||
* lvm2
|
||||
* sqlite3
|
||||
* libguestfs
|
||||
* lxc
|
||||
* iproute2
|
||||
* bridge-utils
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
A simple
|
||||
::
|
||||
|
||||
pacman -S lxc-docker
|
||||
|
||||
is all that is needed.
|
||||
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
||||
There is a systemd service unit created for Docker. To start Docker as service:
|
||||
|
||||
::
|
||||
|
||||
sudo systemctl start lxc-docker
|
||||
|
||||
|
||||
To start on system boot:
|
||||
|
||||
::
|
||||
|
||||
sudo systemctl enable lxc-docker
|
||||
|
||||
Network Configuration
|
||||
---------------------
|
||||
|
||||
IPv4 packet forwarding is disabled by default on FrugalWare, so Internet access from inside
|
||||
the container may not work.
|
||||
|
||||
To enable packet forwarding, run the following command as the ``root`` user on the host system:
|
||||
|
||||
::
|
||||
|
||||
sysctl net.ipv4.ip_forward=1
|
||||
|
||||
And, to make it persistent across reboots, add the following to a file named **/etc/sysctl.d/docker.conf**:
|
||||
|
||||
::
|
||||
|
||||
net.ipv4.ip_forward=1
|
||||
@@ -57,9 +57,18 @@
|
||||
docker-playground:~$ curl get.docker.io | bash
|
||||
docker-playground:~$ sudo update-rc.d docker defaults
|
||||
|
||||
7. Start a new container:
|
||||
7. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses.
|
||||
`See this issue <https://code.google.com/p/google-compute-engine/issues/detail?id=57>`_ for more details.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ echo "DOCKER_OPTS="$DOCKER_OPTS -mtu 1460" | sudo tee -a /etc/defaults/docker
|
||||
docker-playground:~$ sudo service docker restart
|
||||
|
||||
8. Start a new container:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/'
|
||||
docker on GCE \o/
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ Contents:
|
||||
fedora
|
||||
archlinux
|
||||
gentoolinux
|
||||
frugalware
|
||||
vagrant
|
||||
windows
|
||||
amazon
|
||||
|
||||
@@ -115,6 +115,8 @@ Then run ``update-grub``, and reboot.
|
||||
Details
|
||||
-------
|
||||
|
||||
To automatically check some of the requirements below, you can run `lxc-checkconfig`.
|
||||
|
||||
Networking:
|
||||
|
||||
- CONFIG_BRIDGE
|
||||
|
||||
@@ -28,6 +28,15 @@ Installation
|
||||
Firstly, you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
|
||||
|
||||
|
||||
The ``docker-io`` package provides Docker on EPEL.
|
||||
|
||||
|
||||
If you already have the (unrelated) ``docker`` package installed, it will
|
||||
conflict with ``docker-io``. There's a `bug report`_ filed for it.
|
||||
To proceed with ``docker-io`` installation, please remove
|
||||
``docker`` first.
|
||||
|
||||
|
||||
Next, let's install the ``docker-io`` package which will install Docker on our host.
|
||||
|
||||
.. code-block:: bash
|
||||
@@ -56,7 +65,7 @@ Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
sudo docker run -i -t fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
@@ -68,4 +77,5 @@ If you have any issues - please report them directly in the `Red Hat Bugzilla fo
|
||||
.. _Extra Packages for Enterprise Linux (EPEL): https://fedoraproject.org/wiki/EPEL
|
||||
.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
|
||||
.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io
|
||||
.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ Ubuntu
|
||||
Docker is supported on the following versions of Ubuntu:
|
||||
|
||||
- :ref:`ubuntu_precise`
|
||||
- :ref:`ubuntu_raring`
|
||||
- :ref:`ubuntu_raring_saucy`
|
||||
|
||||
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated
|
||||
Firewall) <https://help.ubuntu.com/community/UFW>`_
|
||||
@@ -108,10 +108,12 @@ Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
.. _ubuntu_raring:
|
||||
.. _ubuntu_raring_saucy:
|
||||
|
||||
Ubuntu Raring 13.04 (64 bit)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Ubuntu Raring 13.04 and Saucy 13.10 (64 bit)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
These instructions cover both Ubuntu Raring 13.04 and Saucy 13.10.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
@@ -169,7 +171,6 @@ Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
||||
.. _ufw:
|
||||
|
||||
Docker and UFW
|
||||
|
||||
@@ -9,7 +9,7 @@ Learn Basic Commands
|
||||
Starting Docker
|
||||
---------------
|
||||
|
||||
If you have used one of the quick install paths', Docker may have been
|
||||
If you have used one of the quick install paths, Docker may have been
|
||||
installed with upstart, Ubuntu's system for starting processes at boot
|
||||
time. You should be able to run ``sudo docker help`` and get output.
|
||||
|
||||
@@ -30,8 +30,8 @@ Download a pre-built image
|
||||
# Download an ubuntu image
|
||||
sudo docker pull ubuntu
|
||||
|
||||
This will find the ``ubuntu`` image by name in the :ref:`Central Index
|
||||
<searching_central_index>` and download it from the top-level Central
|
||||
This will find the ``ubuntu`` image by name in the :ref:`Central Index
|
||||
<searching_central_index>` and download it from the top-level Central
|
||||
Repository to a local image cache.
|
||||
|
||||
.. NOTE:: When the image has successfully downloaded, you will see a
|
||||
@@ -53,21 +53,23 @@ Running an interactive shell
|
||||
|
||||
.. _dockergroup:
|
||||
|
||||
sudo and the docker Group
|
||||
-------------------------
|
||||
The sudo command and the docker Group
|
||||
-------------------------------------
|
||||
|
||||
The ``docker`` daemon always runs as root, and since ``docker``
|
||||
version 0.5.2, ``docker`` binds to a Unix socket instead of a TCP
|
||||
port. By default that Unix socket is owned by the user *root*, and so,
|
||||
by default, you can access it with ``sudo``.
|
||||
The ``docker`` daemon always runs as the root user, and since Docker version
|
||||
0.5.2, the ``docker`` daemon binds to a Unix socket instead of a TCP port. By
|
||||
default that Unix socket is owned by the user *root*, and so, by default, you
|
||||
can access it with ``sudo``.
|
||||
|
||||
Starting in version 0.5.3, if you (or your Docker installer) create a
|
||||
Unix group called *docker* and add users to it, then the ``docker``
|
||||
daemon will make the ownership of the Unix socket read/writable by the
|
||||
*docker* group when the daemon starts. The ``docker`` daemon must
|
||||
always run as root, but if you run the ``docker`` client as a user in
|
||||
always run as the root user, but if you run the ``docker`` client as a user in
|
||||
the *docker* group then you don't need to add ``sudo`` to all the
|
||||
client commands. Warning: the *docker* group is root-equivalent.
|
||||
client commands.
|
||||
|
||||
.. warning:: The *docker* group is root-equivalent.
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -97,10 +99,10 @@ Bind Docker to another host/port or a Unix socket
|
||||
<https://github.com/dotcloud/docker/issues/1369>`_). Make sure you
|
||||
control access to ``docker``.
|
||||
|
||||
With -H it is possible to make the Docker daemon to listen on a
|
||||
specific ip and port. By default, it will listen on
|
||||
With ``-H`` it is possible to make the Docker daemon to listen on a
|
||||
specific IP and port. By default, it will listen on
|
||||
``unix:///var/run/docker.sock`` to allow only local connections by the
|
||||
*root* user. You *could* set it to 0.0.0.0:4243 or a specific host ip to
|
||||
*root* user. You *could* set it to ``0.0.0.0:4243`` or a specific host IP to
|
||||
give access to everybody, but that is **not recommended** because then
|
||||
it is trivial for someone to gain root access to the host where the
|
||||
daemon is running.
|
||||
@@ -115,6 +117,11 @@ For example:
|
||||
* ``tcp://host:4243`` -> tcp connection on host:4243
|
||||
* ``unix://path/to/socket`` -> unix socket located at ``path/to/socket``
|
||||
|
||||
``-H``, when empty, will default to the same value as when no ``-H`` was passed in.
|
||||
|
||||
``-H`` also accepts short form for TCP bindings:
|
||||
``host[:port]`` or ``:port``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run docker in daemon mode
|
||||
@@ -179,10 +186,10 @@ Committing (saving) a container state
|
||||
|
||||
Save your containers state to a container image, so the state can be re-used.
|
||||
|
||||
When you commit your container only the differences between the image
|
||||
the container was created from and the current state of the container
|
||||
will be stored (as a diff). See which images you already have using
|
||||
``sudo docker images``
|
||||
When you commit your container only the differences between the image the
|
||||
container was created from and the current state of the container will be
|
||||
stored (as a diff). See which images you already have using the ``docker
|
||||
images`` command.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@@ -194,7 +201,5 @@ will be stored (as a diff). See which images you already have using
|
||||
|
||||
You now have a image state from which you can create new instances.
|
||||
|
||||
|
||||
|
||||
Read more about :ref:`working_with_the_repository` or continue to the
|
||||
complete :ref:`cli`
|
||||
|
||||
@@ -251,6 +251,11 @@ All new files and directories are created with mode 0755, uid and gid
|
||||
if you build using STDIN (``docker build - < somefile``), there is no build
|
||||
context, so the Dockerfile can only contain an URL based ADD statement.
|
||||
|
||||
.. note::
|
||||
if your URL files are protected using authentication, you will need to use
|
||||
an ``RUN wget`` , ``RUN curl`` or other tool from within the container as
|
||||
ADD does not support authentication.
|
||||
|
||||
The copy obeys the following rules:
|
||||
|
||||
* The ``<src>`` path must be inside the *context* of the build; you cannot
|
||||
|
||||
@@ -82,7 +82,7 @@ In this scenario:
|
||||
$ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0
|
||||
|
||||
# Edit your Docker startup file
|
||||
$ echo "DOCKER_OPTS=\"-b=bridge0\"" /etc/default/docker
|
||||
$ echo "DOCKER_OPTS=\"-b=bridge0\"" >> /etc/default/docker
|
||||
|
||||
# Start Docker
|
||||
$ sudo service docker start
|
||||
|
||||
@@ -31,7 +31,7 @@ container, Docker provide ways to bind the container port to an
|
||||
interface of the host system. To simplify communication between
|
||||
containers, Docker provides the linking mechanism.
|
||||
|
||||
Binding a port to an host interface
|
||||
Binding a port to a host interface
|
||||
-----------------------------------
|
||||
|
||||
To bind a port of the container to a specific interface of the host
|
||||
|
||||
@@ -13,7 +13,7 @@ Share Directories via Volumes
|
||||
|
||||
A *data volume* is a specially-designated directory within one or more
|
||||
containers that bypasses the :ref:`ufs_def` to provide several useful
|
||||
features for persistant or shared data:
|
||||
features for persistent or shared data:
|
||||
|
||||
* **Data volumes can be shared and reused between containers.** This
|
||||
is the feature that makes data volumes so powerful. You can use it
|
||||
@@ -30,35 +30,58 @@ Each container can have zero or more data volumes.
|
||||
Getting Started
|
||||
...............
|
||||
|
||||
Using data volumes is as simple as adding a new flag: ``-v``. The
|
||||
parameter ``-v`` can be used more than once in order to create more
|
||||
volumes within the new container. The example below shows the
|
||||
instruction to create a container with two new volumes::
|
||||
Using data volumes is as simple as adding a ``-v`` parameter to the ``docker run``
|
||||
command. The ``-v`` parameter can be used more than once in order to
|
||||
create more volumes within the new container. To create a new container with
|
||||
two new volumes::
|
||||
|
||||
docker run -v /var/volume1 -v /var/volume2 shykes/couchdb
|
||||
$ docker run -v /var/volume1 -v /var/volume2 busybox true
|
||||
|
||||
For a Dockerfile, the VOLUME instruction will add one or more new
|
||||
volumes to any container created from the image::
|
||||
This command will create the new container with two new volumes that
|
||||
exits instantly (``true`` is pretty much the smallest, simplest program
|
||||
that you can run). Once created you can mount its volumes in any other
|
||||
container using the ``-volumes-from`` option; irrespecive of whether the
|
||||
container is running or not.
|
||||
|
||||
VOLUME ["/var/volume1", "/var/volume2"]
|
||||
Or, you can use the VOLUME instruction in a Dockerfile to add one or more new
|
||||
volumes to any container created from that image::
|
||||
|
||||
# BUILD-USING: docker build -t data .
|
||||
# RUN-USING: docker run -name DATA data
|
||||
FROM busybox
|
||||
VOLUME ["/var/volume1", "/var/volume2"]
|
||||
CMD ["/usr/bin/true"]
|
||||
|
||||
Mount Volumes from an Existing Container:
|
||||
-----------------------------------------
|
||||
Creating and mounting a Data Volume Container
|
||||
---------------------------------------------
|
||||
|
||||
The command below creates a new container which is running as daemon
|
||||
``-d`` and with one volume ``/var/lib/couchdb``::
|
||||
If you have some persistent data that you want to share between containers,
|
||||
or want to use from non-persistent containers, its best to create a named
|
||||
Data Volume Container, and then to mount the data from it.
|
||||
|
||||
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
|
||||
Create a named container with volumes to share (``/var/volume1`` and ``/var/volume2``)::
|
||||
|
||||
From the container id of that previous container ``$COUCH1`` it's
|
||||
possible to create new container sharing the same volume using the
|
||||
parameter ``-volumes-from container_id``::
|
||||
$ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true
|
||||
|
||||
COUCH2=$(sudo docker run -d -volumes-from $COUCH1 shykes/couchdb:2013-05-03)
|
||||
Then mount those data volumes into your application containers::
|
||||
|
||||
Now, the second container has the all the information from the first volume.
|
||||
$ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash
|
||||
|
||||
You can use multiple ``-volumes-from`` parameters to bring together multiple
|
||||
data volumes from multiple containers.
|
||||
|
||||
Interestingly, you can mount the volumes that came from the ``DATA`` container in
|
||||
yet another container via the ``client1`` middleman container::
|
||||
|
||||
$ docker run -t -i -rm -volumes-from client1 ubuntu -name client2 bash
|
||||
|
||||
This allows you to abstract the actual data source from users of that data,
|
||||
similar to :ref:`ambassador_pattern_linking <ambassador_pattern_linking>`.
|
||||
|
||||
If you remove containers that mount volumes, including the initial DATA container,
|
||||
or the middleman, the volumes will not be deleted until there are no containers still
|
||||
referencing those volumes. This allows you to upgrade, or effectivly migrate data volumes
|
||||
between containers.
|
||||
|
||||
Mount a Host Directory as a Container Volume:
|
||||
---------------------------------------------
|
||||
@@ -68,13 +91,13 @@ Mount a Host Directory as a Container Volume:
|
||||
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
|
||||
If "host-dir" is missing, then docker creates a new volume.
|
||||
|
||||
This is not available for a Dockerfile due the portability and sharing
|
||||
purpose of it. The [host-dir] volumes is something 100% host dependent
|
||||
and will break on any other machine.
|
||||
This is not available from a Dockerfile as it makes the built image less portable
|
||||
or shareable. [host-dir] volumes are 100% host dependent and will break on any
|
||||
other machine.
|
||||
|
||||
For example::
|
||||
|
||||
sudo docker run -v /var/logs:/var/host_logs:ro shykes/couchdb:2013-05-03
|
||||
sudo docker run -v /var/logs:/var/host_logs:ro ubuntu bash
|
||||
|
||||
The command above mounts the host directory ``/var/logs`` into the
|
||||
container with read only permissions as ``/var/host_logs``.
|
||||
@@ -87,3 +110,6 @@ Known Issues
|
||||
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
|
||||
could indicate a permissions problem with AppArmor. Please see the
|
||||
issue for a workaround.
|
||||
* :issue:`2528`: the busybox container is used to make the resulting container as small and
|
||||
simple as possible - whenever you need to interact with the data in the volume
|
||||
you mount it into another container.
|
||||
|
||||
26
docs/theme/docker/layout.html
vendored
26
docs/theme/docker/layout.html
vendored
@@ -86,26 +86,26 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<div class="container-fluid">
|
||||
|
||||
<!-- Docs nav
|
||||
================================================== -->
|
||||
<div class="row main-row">
|
||||
<div class="row-fluid main-row">
|
||||
|
||||
<div class="span3 sidebar bs-docs-sidebar">
|
||||
<div class="sidebar bs-docs-sidebar">
|
||||
<div class="page-title" >
|
||||
<h4>DOCUMENTATION</h4>
|
||||
</div>
|
||||
|
||||
{{ toctree(collapse=False, maxdepth=3) }}
|
||||
<form>
|
||||
<input type="text" id="st-search-input" class="st-search-input span3" style="width:160px;" />
|
||||
<input type="text" id="st-search-input" class="st-search-input span3" placeholder="search in documentation" style="width:210px;" />
|
||||
<div id="st-results-container"></div>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<!-- body block -->
|
||||
<div class="span9 main-content">
|
||||
<div class="main-content">
|
||||
|
||||
<!-- Main section
|
||||
================================================== -->
|
||||
@@ -134,13 +134,22 @@
|
||||
</div>
|
||||
|
||||
<div class="social links">
|
||||
<a class="twitter" href="http://twitter.com/docker">Twitter</a>
|
||||
<a class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
|
||||
<a title="Docker on Twitter" class="twitter" href="http://twitter.com/docker">Twitter</a>
|
||||
<a title="Docker on GitHub" class="github" href="https://github.com/dotcloud/docker/">GitHub</a>
|
||||
<a title="Docker on Reddit" class="reddit" href="http://www.reddit.com/r/Docker/">Reddit</a>
|
||||
<a title="Docker on Google+" class="googleplus" href="https://plus.google.com/u/0/b/100381662757235514581/communities/108146856671494713993">Google+</a>
|
||||
<a title="Docker on Facebook" class="facebook" href="https://www.facebook.com/docker.run">Facebook</a>
|
||||
<a title="Docker on SlideShare" class="slideshare" href="http://www.slideshare.net/dotCloud">Slideshare</a>
|
||||
<a title="Docker on Youtube" class="youtube" href="http://www.youtube.com/user/dockerrun/">Youtube</a>
|
||||
<a title="Docker on Flickr" class="flickr" href="http://www.flickr.com/photos/99741659@N08/">Flickr</a>
|
||||
<a title="Docker on LinkedIn" class="linkedin" href="http://www.linkedin.com/company/dotcloud">LinkedIn</a>
|
||||
</div>
|
||||
|
||||
<div class="tbox version-flyer ">
|
||||
<div class="content">
|
||||
<small>Current version:</small>
|
||||
<p class="version-note">Note: You are currently browsing the development documentation. The current release may work differently.</p>
|
||||
|
||||
<small>Available versions:</small>
|
||||
<ul class="inline">
|
||||
{% for slug, url in versions %}
|
||||
<li class="alternative"><a href="{{ url }}{%- for word in pagename.split('/') -%}
|
||||
@@ -163,6 +172,7 @@
|
||||
</div>
|
||||
<!-- end of footer -->
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
76
docs/theme/docker/static/css/main.css
vendored
76
docs/theme/docker/static/css/main.css
vendored
@@ -62,9 +62,12 @@ p a.btn {
|
||||
-moz-box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
|
||||
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
|
||||
}
|
||||
.brand.logo a {
|
||||
.brand-logo a {
|
||||
color: white;
|
||||
}
|
||||
.brand-logo a img {
|
||||
width: auto;
|
||||
}
|
||||
.inline-icon {
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
@@ -186,8 +189,15 @@ body {
|
||||
.main-row {
|
||||
margin-top: 40px;
|
||||
}
|
||||
.sidebar {
|
||||
width: 215px;
|
||||
float: left;
|
||||
}
|
||||
.main-content {
|
||||
padding: 16px 18px inherit;
|
||||
margin-left: 230px;
|
||||
/* space for sidebar */
|
||||
|
||||
}
|
||||
/* =======================
|
||||
Social footer
|
||||
@@ -198,20 +208,54 @@ body {
|
||||
}
|
||||
.social .twitter,
|
||||
.social .github,
|
||||
.social .googleplus {
|
||||
background: url("https://www.docker.io/static/img/footer-links.png") no-repeat transparent;
|
||||
.social .googleplus,
|
||||
.social .facebook,
|
||||
.social .slideshare,
|
||||
.social .linkedin,
|
||||
.social .flickr,
|
||||
.social .youtube,
|
||||
.social .reddit {
|
||||
background: url("../img/social/docker_social_logos.png") no-repeat transparent;
|
||||
display: inline-block;
|
||||
height: 35px;
|
||||
height: 32px;
|
||||
overflow: hidden;
|
||||
text-indent: 9999px;
|
||||
width: 35px;
|
||||
margin-right: 10px;
|
||||
width: 32px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
.social :hover {
|
||||
-webkit-transform: rotate(-10deg);
|
||||
-moz-transform: rotate(-10deg);
|
||||
-o-transform: rotate(-10deg);
|
||||
-ms-transform: rotate(-10deg);
|
||||
transform: rotate(-10deg);
|
||||
}
|
||||
.social .twitter {
|
||||
background-position: 0px 2px;
|
||||
background-position: -160px 0px;
|
||||
}
|
||||
.social .reddit {
|
||||
background-position: -256px 0px;
|
||||
}
|
||||
.social .github {
|
||||
background-position: -59px 2px;
|
||||
background-position: -64px 0px;
|
||||
}
|
||||
.social .googleplus {
|
||||
background-position: -96px 0px;
|
||||
}
|
||||
.social .facebook {
|
||||
background-position: 0px 0px;
|
||||
}
|
||||
.social .slideshare {
|
||||
background-position: -128px 0px;
|
||||
}
|
||||
.social .youtube {
|
||||
background-position: -192px 0px;
|
||||
}
|
||||
.social .flickr {
|
||||
background-position: -32px 0px;
|
||||
}
|
||||
.social .linkedin {
|
||||
background-position: -224px 0px;
|
||||
}
|
||||
form table th {
|
||||
vertical-align: top;
|
||||
@@ -342,6 +386,7 @@ div.alert.alert-block {
|
||||
border: 1px solid #88BABC;
|
||||
padding: 5px;
|
||||
font-size: larger;
|
||||
max-width: 300px;
|
||||
}
|
||||
.version-flyer .content {
|
||||
padding-right: 45px;
|
||||
@@ -351,18 +396,18 @@ div.alert.alert-block {
|
||||
background-position: right center;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
.version-flyer .alternative {
|
||||
visibility: hidden;
|
||||
display: none;
|
||||
}
|
||||
.version-flyer .active-slug {
|
||||
visibility: visible;
|
||||
display: inline-block;
|
||||
font-weight: bolder;
|
||||
}
|
||||
.version-flyer:hover .alternative {
|
||||
animation-duration: 1s;
|
||||
display: inline-block;
|
||||
visibility: visible;
|
||||
}
|
||||
.version-flyer .version-note {
|
||||
font-size: 16px;
|
||||
color: black;
|
||||
}
|
||||
/* =====================================
|
||||
Styles for
|
||||
@@ -410,7 +455,7 @@ dt:hover > a.headerlink {
|
||||
.admonition.seealso {
|
||||
border-color: #23cb1f;
|
||||
}
|
||||
|
||||
/* Add styles for other types of comments */
|
||||
.versionchanged,
|
||||
.versionadded,
|
||||
.versionmodified,
|
||||
@@ -418,15 +463,12 @@ dt:hover > a.headerlink {
|
||||
font-size: larger;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.versionchanged {
|
||||
color: lightseagreen;
|
||||
}
|
||||
|
||||
.versionadded {
|
||||
color: mediumblue;
|
||||
}
|
||||
|
||||
.deprecated {
|
||||
color: orangered;
|
||||
}
|
||||
|
||||
107
docs/theme/docker/static/css/main.less
vendored
107
docs/theme/docker/static/css/main.less
vendored
@@ -98,7 +98,6 @@ p a {
|
||||
}
|
||||
|
||||
|
||||
|
||||
.navbar .brand {
|
||||
margin-left: 0px;
|
||||
float: left;
|
||||
@@ -126,9 +125,11 @@ p a {
|
||||
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.065);
|
||||
}
|
||||
|
||||
.brand.logo a {
|
||||
.brand-logo a {
|
||||
color: white;
|
||||
|
||||
img {
|
||||
width: auto;
|
||||
}
|
||||
}
|
||||
|
||||
.logo {
|
||||
@@ -317,10 +318,18 @@ body {
|
||||
margin-top: 40px;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
width: 215px;
|
||||
float: left;
|
||||
}
|
||||
|
||||
.main-content {
|
||||
padding: 16px 18px inherit;
|
||||
margin-left: 230px; /* space for sidebar */
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* =======================
|
||||
Social footer
|
||||
======================= */
|
||||
@@ -330,24 +339,64 @@ body {
|
||||
margin-top: 15px;
|
||||
}
|
||||
|
||||
.social .twitter, .social .github, .social .googleplus {
|
||||
background: url("https://www.docker.io/static/img/footer-links.png") no-repeat transparent;
|
||||
display: inline-block;
|
||||
height: 35px;
|
||||
overflow: hidden;
|
||||
text-indent: 9999px;
|
||||
width: 35px;
|
||||
margin-right: 10px;
|
||||
.social {
|
||||
.twitter, .github, .googleplus, .facebook, .slideshare, .linkedin, .flickr, .youtube, .reddit {
|
||||
background: url("../img/social/docker_social_logos.png") no-repeat transparent;
|
||||
display: inline-block;
|
||||
height: 32px;
|
||||
overflow: hidden;
|
||||
text-indent: 9999px;
|
||||
width: 32px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
}
|
||||
|
||||
.social :hover {
|
||||
-webkit-transform: rotate(-10deg);
|
||||
-moz-transform: rotate(-10deg);
|
||||
-o-transform: rotate(-10deg);
|
||||
-ms-transform: rotate(-10deg);
|
||||
transform: rotate(-10deg);
|
||||
}
|
||||
|
||||
.social .twitter {
|
||||
background-position: 0px 2px;
|
||||
background-position: -160px 0px;
|
||||
}
|
||||
|
||||
.social .reddit {
|
||||
background-position: -256px 0px;
|
||||
}
|
||||
|
||||
.social .github {
|
||||
background-position: -59px 2px;
|
||||
background-position: -64px 0px;
|
||||
}
|
||||
|
||||
.social .googleplus {
|
||||
background-position: -96px 0px;
|
||||
}
|
||||
|
||||
.social .facebook {
|
||||
background-position: -0px 0px;
|
||||
}
|
||||
|
||||
.social .slideshare {
|
||||
background-position: -128px 0px;
|
||||
}
|
||||
|
||||
.social .youtube {
|
||||
background-position: -192px 0px;
|
||||
}
|
||||
|
||||
.social .flickr {
|
||||
background-position: -32px 0px;
|
||||
}
|
||||
|
||||
.social .linkedin {
|
||||
background-position: -224px 0px;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Styles on the forms
|
||||
// ----------------------------------
|
||||
|
||||
@@ -528,31 +577,34 @@ div.alert.alert-block {
|
||||
border: 1px solid #88BABC;
|
||||
padding: 5px;
|
||||
font-size: larger;
|
||||
max-width: 300px;
|
||||
|
||||
.content {
|
||||
padding-right: 45px;
|
||||
margin-top: 7px;
|
||||
margin-left: 7px;
|
||||
// display: inline-block;
|
||||
background-image: url('../img/container3.png');
|
||||
background-position: right center;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
|
||||
.alternative {
|
||||
visibility: hidden;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.active-slug {
|
||||
visibility: visible;
|
||||
display: inline-block;
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
&:hover .alternative {
|
||||
animation-duration: 1s;
|
||||
display: inline-block;
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.version-note {
|
||||
font-size: 16px;
|
||||
color: black;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -612,3 +664,24 @@ dt:hover > a.headerlink {
|
||||
|
||||
}
|
||||
|
||||
/* Add styles for other types of comments */
|
||||
|
||||
.versionchanged,
|
||||
.versionadded,
|
||||
.versionmodified,
|
||||
.deprecated {
|
||||
font-size: larger;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.versionchanged {
|
||||
color: lightseagreen;
|
||||
}
|
||||
|
||||
.versionadded {
|
||||
color: mediumblue;
|
||||
}
|
||||
|
||||
.deprecated {
|
||||
color: orangered;
|
||||
}
|
||||
|
||||
BIN
docs/theme/docker/static/img/footer-links.png
vendored
BIN
docs/theme/docker/static/img/footer-links.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 2.1 KiB |
BIN
docs/theme/docker/static/img/social/docker_social_logos.png
vendored
Normal file
BIN
docs/theme/docker/static/img/social/docker_social_logos.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.1 KiB |
22
docs/theme/docker/static/js/docs.js
vendored
22
docs/theme/docker/static/js/docs.js
vendored
@@ -53,14 +53,6 @@ $(function(){
|
||||
}
|
||||
}
|
||||
|
||||
if (doc_version == "") {
|
||||
$('.version-flyer ul').html('<li class="alternative active-slug"><a href="" title="Switch to local">Local</a></li>');
|
||||
}
|
||||
|
||||
// mark the active documentation in the version widget
|
||||
$(".version-flyer a:contains('" + doc_version + "')").parent().addClass('active-slug');
|
||||
|
||||
|
||||
// attached handler on click
|
||||
// Do not attach to first element or last (intro, faq) so that
|
||||
// first and last link directly instead of accordian
|
||||
@@ -95,4 +87,18 @@ $(function(){
|
||||
// add class to all those which have children
|
||||
$('.sidebar > ul > li').not(':last').not(':first').addClass('has-children');
|
||||
|
||||
|
||||
if (doc_version == "") {
|
||||
$('.version-flyer ul').html('<li class="alternative active-slug"><a href="" title="Switch to local">Local</a></li>');
|
||||
}
|
||||
|
||||
if (doc_version == "latest") {
|
||||
$('.version-flyer .version-note').hide();
|
||||
}
|
||||
|
||||
// mark the active documentation in the version widget
|
||||
$(".version-flyer a:contains('" + doc_version + "')").parent().addClass('active-slug').setAttribute("title", "Current version");
|
||||
|
||||
|
||||
|
||||
});
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
@@ -79,9 +80,24 @@ func New(root string) (*Engine, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Docker makes some assumptions about the "absoluteness" of root
|
||||
// ... so let's make sure it has no symlinks
|
||||
if p, err := filepath.Abs(root); err != nil {
|
||||
log.Fatalf("Unable to get absolute root (%s): %s", root, err)
|
||||
} else {
|
||||
root = p
|
||||
}
|
||||
if p, err := filepath.EvalSymlinks(root); err != nil {
|
||||
log.Fatalf("Unable to canonicalize root (%s): %s", root, err)
|
||||
} else {
|
||||
root = p
|
||||
}
|
||||
|
||||
eng := &Engine{
|
||||
root: root,
|
||||
handlers: make(map[string]Handler),
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestRegister(t *testing.T) {
|
||||
|
||||
eng := newTestEngine(t)
|
||||
|
||||
//Should fail because globan handlers are copied
|
||||
//Should fail because global handlers are copied
|
||||
//at the engine creation
|
||||
if err := eng.Register("dummy1", nil); err == nil {
|
||||
t.Fatalf("Expecting error, got none")
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"path"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
// ServeHTTP executes a job as specified by the http request `r`, and sends the
|
||||
@@ -22,7 +22,7 @@ func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
jobArgs = []string{}
|
||||
}
|
||||
w.Header().Set("Job-Name", jobName)
|
||||
for _, arg := range(jobArgs) {
|
||||
for _, arg := range jobArgs {
|
||||
w.Header().Add("Job-Args", arg)
|
||||
}
|
||||
job := eng.Job(jobName, jobArgs...)
|
||||
|
||||
15
graph.go
15
graph.go
@@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
@@ -56,6 +57,7 @@ func (graph *Graph) restore() error {
|
||||
graph.idIndex.Add(id)
|
||||
}
|
||||
}
|
||||
utils.Debugf("Restored %d elements", len(dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -85,17 +87,17 @@ func (graph *Graph) Get(name string) (*Image, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check that the filesystem layer exists
|
||||
rootfs, err := graph.driver.Get(img.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err)
|
||||
}
|
||||
if img.ID != id {
|
||||
return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID)
|
||||
}
|
||||
img.graph = graph
|
||||
|
||||
if img.Size < 0 {
|
||||
rootfs, err := graph.driver.Get(img.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err)
|
||||
}
|
||||
|
||||
var size int64
|
||||
if img.Parent == "" {
|
||||
if size, err = utils.TreeSize(rootfs); err != nil {
|
||||
@@ -130,7 +132,8 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm
|
||||
DockerVersion: VERSION,
|
||||
Author: author,
|
||||
Config: config,
|
||||
Architecture: "x86_64",
|
||||
Architecture: runtime.GOARCH,
|
||||
OS: runtime.GOOS,
|
||||
}
|
||||
if container != nil {
|
||||
img.Parent = container.Image
|
||||
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
mountpk "github.com/dotcloud/docker/mount"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -296,7 +296,7 @@ func (a *Driver) unmount(id string) error {
|
||||
|
||||
func (a *Driver) mounted(id string) (bool, error) {
|
||||
target := path.Join(a.rootPath(), "mnt", id)
|
||||
return Mounted(target)
|
||||
return mountpk.Mounted(target)
|
||||
}
|
||||
|
||||
// During cleanup aufs needs to unmount all mountpoints
|
||||
@@ -327,7 +327,7 @@ func (a *Driver) aufsMount(ro []string, rw, target string) (err error) {
|
||||
|
||||
for _, layer := range ro {
|
||||
branch := fmt.Sprintf("append:%s=ro+wh", layer)
|
||||
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, branch); err != nil {
|
||||
if err = mount("none", target, "aufs", MsRemount, branch); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,9 +2,7 @@ package aufs
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
@@ -17,21 +15,3 @@ func Unmount(target string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
mntpoint, err := os.Stat(mountpoint)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
|
||||
parentSt := parent.Sys().(*syscall.Stat_t)
|
||||
|
||||
return mntpointSt.Dev != parentSt.Dev, nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package aufs
|
||||
|
||||
import "errors"
|
||||
|
||||
const MsRemount = 0
|
||||
|
||||
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
|
||||
return errors.New("mount is not implemented on darwin")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package aufs
|
||||
|
||||
import "syscall"
|
||||
|
||||
const MsRemount = syscall.MS_REMOUNT
|
||||
|
||||
func mount(source string, target string, fstype string, flags uintptr, data string) error {
|
||||
return syscall.Mount(source, target, fstype, flags, data)
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ func (devices *DeviceSet) allocateTransactionId() uint64 {
|
||||
func (devices *DeviceSet) saveMetadata() error {
|
||||
jsonData, err := json.Marshal(devices.MetaData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error encoding metaadata to json: %s", err)
|
||||
return fmt.Errorf("Error encoding metadata to json: %s", err)
|
||||
}
|
||||
tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json")
|
||||
if err != nil {
|
||||
|
||||
@@ -36,9 +36,8 @@ func (d *Driver) Cleanup() error {
|
||||
}
|
||||
|
||||
func copyDir(src, dst string) error {
|
||||
cmd := exec.Command("cp", "-aT", "--reflink=auto", src, dst)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -36,8 +36,9 @@ To build docker, you will need the following system dependencies
|
||||
|
||||
* An amd64 machine
|
||||
* A recent version of git and mercurial
|
||||
* Go version 1.2 or later (see notes below regarding using Go 1.1.2 and dynbinary)
|
||||
* Go version 1.2 or later
|
||||
* SQLite version 3.7.9 or later
|
||||
* libdevmapper from lvm2 version 1.02.77 or later (http://www.sourceware.org/lvm2/)
|
||||
* A clean checkout of the source must be added to a valid Go [workspace](http://golang.org/doc/code.html#Workspaces)
|
||||
under the path *src/github.com/dotcloud/docker*.
|
||||
|
||||
@@ -91,8 +92,7 @@ You would do the users of your distro a disservice and "void the docker warranty
|
||||
A good comparison is Busybox: all distros package it as a statically linked binary, because it just
|
||||
makes sense. Docker is the same way.
|
||||
|
||||
If you *must* have a non-static Docker binary, or require Go 1.1.2 (since Go 1.2 is still freshly released
|
||||
at the time of this writing), please use:
|
||||
If you *must* have a non-static Docker binary, please use:
|
||||
|
||||
```bash
|
||||
./hack/make.sh dynbinary
|
||||
|
||||
@@ -136,7 +136,7 @@ sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
|
||||
sudo('DEBIAN_FRONTEND=noninteractive apt-get install -q -y wget python-dev'
|
||||
' python-pip supervisor git mercurial linux-image-extra-$(uname -r)'
|
||||
' aufs-tools make libfontconfig libevent-dev libsqlite3-dev libssl-dev')
|
||||
sudo('wget -O - https://go.googlecode.com/files/go1.1.2.linux-amd64.tar.gz | '
|
||||
sudo('wget -O - https://go.googlecode.com/files/go1.2.linux-amd64.tar.gz | '
|
||||
'tar -v -C /usr/local -xz; ln -s /usr/local/go/bin/go /usr/bin/go')
|
||||
sudo('GOPATH=/go go get -d github.com/dotcloud/docker')
|
||||
sudo('pip install -r {}/requirements.txt'.format(CFG_PATH))
|
||||
|
||||
@@ -116,7 +116,7 @@ case "$lsb_dist" in
|
||||
(
|
||||
set -x
|
||||
$sh_c 'docker run busybox echo "Docker has been successfully installed!"'
|
||||
)
|
||||
) || true
|
||||
fi
|
||||
exit 0
|
||||
;;
|
||||
|
||||
19
hack/make.sh
19
hack/make.sh
@@ -15,8 +15,9 @@ set -e
|
||||
# - The script is intented to be run inside the docker container specified
|
||||
# in the Dockerfile at the root of the source. In other words:
|
||||
# DO NOT CALL THIS SCRIPT DIRECTLY.
|
||||
# - The right way to call this script is to invoke "docker build ." from
|
||||
# your checkout of the Docker repository, and then
|
||||
# - The right way to call this script is to invoke "make" from
|
||||
# your checkout of the Docker repository.
|
||||
# the Makefile will do a "docker build -t docker ." and then
|
||||
# "docker run hack/make.sh" in the resulting container image.
|
||||
#
|
||||
|
||||
@@ -28,7 +29,7 @@ RESOLVCONF=$(readlink --canonicalize /etc/resolv.conf)
|
||||
grep -q "$RESOLVCONF" /proc/mounts || {
|
||||
echo >&2 "# WARNING! I don't seem to be running in a docker container."
|
||||
echo >&2 "# The result of this command might be an incorrect build, and will not be officially supported."
|
||||
echo >&2 "# Try this: 'docker build -t docker . && docker run docker ./hack/make.sh'"
|
||||
echo >&2 "# Try this: 'make all'"
|
||||
}
|
||||
|
||||
# List of bundles to create when no argument is passed
|
||||
@@ -40,6 +41,7 @@ DEFAULT_BUNDLES=(
|
||||
dyntest
|
||||
dyntest-integration
|
||||
cover
|
||||
cross
|
||||
tgz
|
||||
ubuntu
|
||||
)
|
||||
@@ -63,10 +65,13 @@ fi
|
||||
# Use these flags when compiling the tests and final binary
|
||||
LDFLAGS='-X main.GITCOMMIT "'$GITCOMMIT'" -X main.VERSION "'$VERSION'" -w'
|
||||
LDFLAGS_STATIC='-X github.com/dotcloud/docker/utils.IAMSTATIC true -linkmode external -extldflags "-lpthread -static -Wl,--unresolved-symbols=ignore-in-object-files"'
|
||||
BUILDFLAGS='-tags netgo'
|
||||
BUILDFLAGS='-tags netgo -a'
|
||||
|
||||
HAVE_GO_TEST_COVER=
|
||||
if go help testflag | grep -q -- -cover; then
|
||||
if \
|
||||
go help testflag | grep -- -cover > /dev/null \
|
||||
&& go tool -n cover > /dev/null 2>&1 \
|
||||
; then
|
||||
HAVE_GO_TEST_COVER=1
|
||||
fi
|
||||
|
||||
@@ -85,10 +90,6 @@ go_test_dir() {
|
||||
coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
|
||||
testcover=( -cover -coverprofile "$coverprofile" )
|
||||
fi
|
||||
( # we run "go test -i" ouside the "set -x" to provde cleaner output
|
||||
cd "$dir"
|
||||
go test -i -ldflags "$LDFLAGS" $BUILDFLAGS
|
||||
)
|
||||
(
|
||||
set -x
|
||||
cd "$dir"
|
||||
|
||||
23
hack/make/cross
Normal file
23
hack/make/cross
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEST=$1
|
||||
|
||||
# if we have our linux/amd64 version compiled, let's symlink it in
|
||||
if [ -x "$DEST/../binary/docker-$VERSION" ]; then
|
||||
mkdir -p "$DEST/linux/amd64"
|
||||
(
|
||||
cd "$DEST/linux/amd64"
|
||||
ln -s ../../../binary/* ./
|
||||
)
|
||||
echo "Created symlinks:" "$DEST/linux/amd64/"*
|
||||
fi
|
||||
|
||||
for platform in $DOCKER_CROSSPLATFORMS; do
|
||||
(
|
||||
mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION
|
||||
export GOOS=${platform%/*}
|
||||
export GOARCH=${platform##*/}
|
||||
export LDFLAGS_STATIC="" # we just need a simple client for these platforms (TODO this might change someday)
|
||||
source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform"
|
||||
)
|
||||
done
|
||||
@@ -3,7 +3,7 @@
|
||||
DEST=$1
|
||||
|
||||
# dockerinit still needs to be a static binary, even if docker is dynamic
|
||||
CGO_ENABLED=0 go build -a -o $DEST/dockerinit-$VERSION -ldflags "$LDFLAGS -d" $BUILDFLAGS ./dockerinit
|
||||
CGO_ENABLED=0 go build -o $DEST/dockerinit-$VERSION -ldflags "$LDFLAGS -d" $BUILDFLAGS ./dockerinit
|
||||
echo "Created binary: $DEST/dockerinit-$VERSION"
|
||||
ln -sf dockerinit-$VERSION $DEST/dockerinit
|
||||
|
||||
@@ -12,6 +12,6 @@ export DOCKER_INITSHA1="$(sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)"
|
||||
# exported so that "dyntest" can easily access it later without recalculating it
|
||||
|
||||
(
|
||||
export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\""
|
||||
export LDFLAGS_STATIC="-X github.com/dotcloud/docker/utils.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/utils.INITPATH \"$DOCKER_INITPATH\""
|
||||
source "$(dirname "$BASH_SOURCE")/binary"
|
||||
)
|
||||
|
||||
@@ -1,23 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEST="$1"
|
||||
BINARY="$DEST/../binary/docker-$VERSION"
|
||||
TGZ="$DEST/docker-$VERSION.tgz"
|
||||
CROSS="$DEST/../cross"
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -x "$BINARY" ]; then
|
||||
echo >&2 'error: binary must be run before tgz'
|
||||
if [ ! -d "$CROSS/linux/amd64" ]; then
|
||||
echo >&2 'error: binary and cross must be run before tgz'
|
||||
false
|
||||
fi
|
||||
|
||||
mkdir -p "$DEST/build"
|
||||
|
||||
mkdir -p "$DEST/build/usr/local/bin"
|
||||
cp -L "$BINARY" "$DEST/build/usr/local/bin/docker"
|
||||
|
||||
tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
|
||||
|
||||
rm -rf "$DEST/build"
|
||||
|
||||
echo "Created tgz: $TGZ"
|
||||
for d in "$CROSS/"*/*; do
|
||||
GOARCH="$(basename "$d")"
|
||||
GOOS="$(basename "$(dirname "$d")")"
|
||||
mkdir -p "$DEST/$GOOS/$GOARCH"
|
||||
TGZ="$DEST/$GOOS/$GOARCH/docker-$VERSION.tgz"
|
||||
|
||||
mkdir -p "$DEST/build"
|
||||
|
||||
mkdir -p "$DEST/build/usr/local/bin"
|
||||
cp -L "$d/docker-$VERSION" "$DEST/build/usr/local/bin/docker"
|
||||
|
||||
tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
|
||||
|
||||
rm -rf "$DEST/build"
|
||||
|
||||
echo "Created tgz: $TGZ"
|
||||
done
|
||||
|
||||
@@ -17,6 +17,7 @@ repeatability across servers.
|
||||
Docker is a great building block for automating distributed systems:
|
||||
large-scale web deployments, database clusters, continuous deployment systems,
|
||||
private PaaS, service-oriented architectures, etc."
|
||||
PACKAGE_LICENSE="Apache-2.0"
|
||||
|
||||
# Build docker as an ubuntu package using FPM and REPREPRO (sue me).
|
||||
# bundle_binary must be called first.
|
||||
@@ -30,6 +31,20 @@ bundle_ubuntu() {
|
||||
mkdir -p $DIR/lib/systemd
|
||||
cp -R contrib/init/systemd $DIR/lib/systemd/system
|
||||
|
||||
mkdir -p $DIR/etc/default
|
||||
cat > $DIR/etc/default/docker <<'EOF'
|
||||
# Docker Upstart and SysVinit configuration file
|
||||
|
||||
# Customize location of Docker binary (especially for development testing).
|
||||
#DOCKER="/usr/local/bin/docker"
|
||||
|
||||
# Use DOCKER_OPTS to modify the daemon startup options.
|
||||
#DOCKER_OPTS="-dns 8.8.8.8"
|
||||
|
||||
# If you need Docker to use an HTTP proxy, it can also be specified here.
|
||||
#export http_proxy=http://127.0.0.1:3128/
|
||||
EOF
|
||||
|
||||
# Copy the binary
|
||||
# This will fail if the binary bundle hasn't been built
|
||||
mkdir -p $DIR/usr/bin
|
||||
@@ -104,7 +119,7 @@ EOF
|
||||
--replaces lxc-docker \
|
||||
--replaces lxc-docker-virtual-package \
|
||||
--url "$PACKAGE_URL" \
|
||||
--vendor "$PACKAGE_VENDOR" \
|
||||
--license "$PACKAGE_LICENSE" \
|
||||
--config-files /etc/init/docker.conf \
|
||||
--config-files /etc/init.d/docker \
|
||||
--config-files /etc/default/docker \
|
||||
@@ -118,13 +133,9 @@ EOF
|
||||
--description "$PACKAGE_DESCRIPTION" \
|
||||
--maintainer "$PACKAGE_MAINTAINER" \
|
||||
--url "$PACKAGE_URL" \
|
||||
--vendor "$PACKAGE_VENDOR" \
|
||||
--config-files /etc/init/docker.conf \
|
||||
--config-files /etc/init.d/docker \
|
||||
--config-files /etc/default/docker \
|
||||
--license "$PACKAGE_LICENSE" \
|
||||
--deb-compression xz \
|
||||
-t deb .
|
||||
# note: the --config-files lines have to be duplicated to stop overwrite on package upgrade (since we have to use this funky virtual package)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
112
hack/release.sh
112
hack/release.sh
@@ -47,6 +47,7 @@ cd /go/src/github.com/dotcloud/docker
|
||||
|
||||
RELEASE_BUNDLES=(
|
||||
binary
|
||||
cross
|
||||
tgz
|
||||
ubuntu
|
||||
)
|
||||
@@ -113,6 +114,77 @@ s3_url() {
|
||||
esac
|
||||
}
|
||||
|
||||
release_build() {
|
||||
GOOS=$1
|
||||
GOARCH=$2
|
||||
|
||||
BINARY=bundles/$VERSION/cross/$GOOS/$GOARCH/docker-$VERSION
|
||||
TGZ=bundles/$VERSION/tgz/$GOOS/$GOARCH/docker-$VERSION.tgz
|
||||
|
||||
# we need to map our GOOS and GOARCH to uname values
|
||||
# see https://en.wikipedia.org/wiki/Uname
|
||||
# ie, GOOS=linux -> "uname -s"=Linux
|
||||
|
||||
S3OS=$GOOS
|
||||
case "$S3OS" in
|
||||
darwin)
|
||||
S3OS=Darwin
|
||||
;;
|
||||
freebsd)
|
||||
S3OS=FreeBSD
|
||||
;;
|
||||
linux)
|
||||
S3OS=Linux
|
||||
;;
|
||||
*)
|
||||
echo >&2 "error: can't convert $S3OS to an appropriate value for 'uname -s'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
S3ARCH=$GOARCH
|
||||
case "$S3ARCH" in
|
||||
amd64)
|
||||
S3ARCH=x86_64
|
||||
;;
|
||||
386)
|
||||
S3ARCH=i386
|
||||
;;
|
||||
arm)
|
||||
# GOARCH is fine
|
||||
;;
|
||||
*)
|
||||
echo >&2 "error: can't convert $S3ARCH to an appropriate value for 'uname -m'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
S3DIR=s3://$BUCKET/builds/$S3OS/$S3ARCH
|
||||
|
||||
if [ ! -x "$BINARY" ]; then
|
||||
echo >&2 "error: can't find $BINARY - was it compiled properly?"
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "$TGZ" ]; then
|
||||
echo >&2 "error: can't find $TGZ - was it packaged properly?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Uploading $BINARY to $S3OS/$S3ARCH/docker-$VERSION"
|
||||
s3cmd --follow-symlinks --preserve --acl-public put $BINARY $S3DIR/docker-$VERSION
|
||||
|
||||
echo "Uploading $TGZ to $S3OS/$S3ARCH/docker-$VERSION.tgz"
|
||||
s3cmd --follow-symlinks --preserve --acl-public put $TGZ $S3DIR/docker-$VERSION.tgz
|
||||
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
echo "Copying $S3OS/$S3ARCH/docker-$VERSION to $S3OS/$S3ARCH/docker-latest"
|
||||
s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
|
||||
|
||||
echo "Copying $S3OS/$S3ARCH/docker-$VERSION.tgz to $S3OS/$S3ARCH/docker-latest.tgz"
|
||||
s3cmd --acl-public cp $S3DIR/docker-$VERSION.tgz $S3DIR/docker-latest.tgz
|
||||
fi
|
||||
}
|
||||
|
||||
# Upload the 'ubuntu' bundle to S3:
|
||||
# 1. A full APT repository is published at $BUCKET/ubuntu/
|
||||
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
|
||||
@@ -189,31 +261,21 @@ EOF
|
||||
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
|
||||
}
|
||||
|
||||
# Upload a tgz to S3
|
||||
release_tgz() {
|
||||
[ -e bundles/$VERSION/tgz/docker-$VERSION.tgz ] || {
|
||||
echo >&2 './hack/make.sh must be run before release_binary'
|
||||
# Upload binaries and tgz files to S3
|
||||
release_binaries() {
|
||||
[ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || {
|
||||
echo >&2 './hack/make.sh must be run before release_binaries'
|
||||
exit 1
|
||||
}
|
||||
|
||||
S3DIR=s3://$BUCKET/builds/Linux/x86_64
|
||||
s3cmd --acl-public put bundles/$VERSION/tgz/docker-$VERSION.tgz $S3DIR/docker-$VERSION.tgz
|
||||
for d in bundles/$VERSION/cross/*/*; do
|
||||
GOARCH="$(basename "$d")"
|
||||
GOOS="$(basename "$(dirname "$d")")"
|
||||
release_build "$GOOS" "$GOARCH"
|
||||
done
|
||||
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
echo "Copying docker-$VERSION.tgz to docker-latest.tgz"
|
||||
s3cmd --acl-public cp $S3DIR/docker-$VERSION.tgz $S3DIR/docker-latest.tgz
|
||||
fi
|
||||
}
|
||||
# TODO create redirect from builds/*/i686 to builds/*/i386
|
||||
|
||||
# Upload a static binary to S3
|
||||
release_binary() {
|
||||
[ -e bundles/$VERSION/binary/docker-$VERSION ] || {
|
||||
echo >&2 './hack/make.sh must be run before release_binary'
|
||||
exit 1
|
||||
}
|
||||
|
||||
S3DIR=s3://$BUCKET/builds/Linux/x86_64
|
||||
s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
|
||||
cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
|
||||
# To install, run the following command as root:
|
||||
curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
|
||||
@@ -226,8 +288,6 @@ EOF
|
||||
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info
|
||||
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
echo "Copying docker-$VERSION to docker-latest"
|
||||
s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
|
||||
echo "Advertising $VERSION on $BUCKET as most recent version"
|
||||
echo $VERSION | write_to_s3 s3://$BUCKET/latest
|
||||
fi
|
||||
@@ -246,11 +306,15 @@ release_test() {
|
||||
|
||||
main() {
|
||||
setup_s3
|
||||
release_binary
|
||||
release_tgz
|
||||
release_binaries
|
||||
release_ubuntu
|
||||
release_index
|
||||
release_test
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
echo
|
||||
echo
|
||||
echo "Release complete; see $(s3_url)"
|
||||
echo
|
||||
|
||||
49
hack/travis/dco.py
Executable file
49
hack/travis/dco.py
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python
|
||||
import re
|
||||
import subprocess
|
||||
import yaml
|
||||
|
||||
from env import commit_range
|
||||
|
||||
commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2)%B'
|
||||
|
||||
gitlog = subprocess.check_output([
|
||||
'git', 'log', '--reverse',
|
||||
'--format=format:'+commit_format,
|
||||
'..'.join(commit_range), '--',
|
||||
])
|
||||
|
||||
commits = yaml.load(gitlog)
|
||||
if not commits:
|
||||
exit(0) # what? how can we have no commits?
|
||||
|
||||
DCO = 'Docker-DCO-1.0-Signed-off-by:'
|
||||
|
||||
p = re.compile(r'^{0} ([^<]+) <([^<>@]+@[^<>]+)> \(github: (\S+)\)$'.format(re.escape(DCO)), re.MULTILINE|re.UNICODE)
|
||||
|
||||
failed_commits = 0
|
||||
|
||||
for commit in commits:
|
||||
commit['stat'] = subprocess.check_output([
|
||||
'git', 'log', '--format=format:', '--max-count=1',
|
||||
'--name-status', commit['hash'], '--',
|
||||
])
|
||||
if commit['stat'] == '':
|
||||
print 'Commit {0} has no actual changed content, skipping.'.format(commit['hash'])
|
||||
continue
|
||||
|
||||
m = p.search(commit['message'])
|
||||
if not m:
|
||||
print 'Commit {1} does not have a properly formatted "{0}" marker.'.format(DCO, commit['hash'])
|
||||
failed_commits += 1
|
||||
continue # print ALL the commits that don't have a proper DCO
|
||||
|
||||
(name, email, github) = m.groups()
|
||||
|
||||
# TODO verify that "github" is the person who actually made this commit via the GitHub API
|
||||
|
||||
if failed_commits > 0:
|
||||
exit(failed_commits)
|
||||
|
||||
print 'All commits have a valid "{0}" marker.'.format(DCO)
|
||||
exit(0)
|
||||
21
hack/travis/env.py
Normal file
21
hack/travis/env.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
if 'TRAVIS' not in os.environ:
|
||||
print 'TRAVIS is not defined; this should run in TRAVIS. Sorry.'
|
||||
exit(127)
|
||||
|
||||
if os.environ['TRAVIS_PULL_REQUEST'] != 'false':
|
||||
commit_range = [os.environ['TRAVIS_BRANCH'], 'FETCH_HEAD']
|
||||
else:
|
||||
try:
|
||||
subprocess.check_call([
|
||||
'git', 'log', '-1', '--format=format:',
|
||||
os.environ['TRAVIS_COMMIT_RANGE'], '--',
|
||||
])
|
||||
commit_range = os.environ['TRAVIS_COMMIT_RANGE'].split('...')
|
||||
if len(commit_range) == 1: # if it didn't split, it must have been separated by '..' instead
|
||||
commit_range = commit_range[0].split('..')
|
||||
except subprocess.CalledProcessError:
|
||||
print 'TRAVIS_COMMIT_RANGE is invalid. This seems to be a force push. We will just assume it must be against upstream master and compare all commits in between.'
|
||||
commit_range = ['upstream/master', 'HEAD']
|
||||
28
hack/travis/gofmt.py
Executable file
28
hack/travis/gofmt.py
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python
|
||||
import subprocess
|
||||
|
||||
from env import commit_range
|
||||
|
||||
files = subprocess.check_output([
|
||||
'git', 'diff', '--diff-filter=ACMR',
|
||||
'--name-only', '...'.join(commit_range), '--',
|
||||
])
|
||||
|
||||
exit_status = 0
|
||||
|
||||
for filename in files.split('\n'):
|
||||
if filename.endswith('.go'):
|
||||
try:
|
||||
out = subprocess.check_output(['gofmt', '-s', '-l', filename])
|
||||
if out != '':
|
||||
print out,
|
||||
exit_status = 1
|
||||
except subprocess.CalledProcessError:
|
||||
exit_status = 1
|
||||
|
||||
if exit_status != 0:
|
||||
print 'Reformat the files listed above with "gofmt -s -w" and try again.'
|
||||
exit(exit_status)
|
||||
|
||||
print 'All files pass gofmt.'
|
||||
exit(0)
|
||||
@@ -6,7 +6,7 @@ if [[ ! -d vendor ]]; then
|
||||
fi
|
||||
vendor_dir=${PWD}/vendor
|
||||
|
||||
git_clone () {
|
||||
rm_pkg_dir () {
|
||||
PKG=$1
|
||||
REV=$2
|
||||
(
|
||||
@@ -16,11 +16,31 @@ git_clone () {
|
||||
echo "src/$PKG already exists. Removing."
|
||||
rm -fr src/$PKG
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
git_clone () {
|
||||
PKG=$1
|
||||
REV=$2
|
||||
(
|
||||
set -e
|
||||
rm_pkg_dir $PKG $REV
|
||||
cd $vendor_dir && git clone http://$PKG src/$PKG
|
||||
cd src/$PKG && git checkout -f $REV && rm -fr .git
|
||||
)
|
||||
}
|
||||
|
||||
hg_clone () {
|
||||
PKG=$1
|
||||
REV=$2
|
||||
(
|
||||
set -e
|
||||
rm_pkg_dir $PKG $REV
|
||||
cd $vendor_dir && hg clone http://$PKG src/$PKG
|
||||
cd src/$PKG && hg checkout -r $REV && rm -fr .hg
|
||||
)
|
||||
}
|
||||
|
||||
git_clone github.com/kr/pty 3b1f6487b
|
||||
|
||||
git_clone github.com/gorilla/context/ 708054d61e5
|
||||
@@ -29,13 +49,6 @@ git_clone github.com/gorilla/mux/ 9b36453141c
|
||||
|
||||
git_clone github.com/syndtr/gocapability 3454319be2
|
||||
|
||||
# Docker requires code.google.com/p/go.net/websocket
|
||||
PKG=code.google.com/p/go.net REV=84a4013f96e0
|
||||
(
|
||||
set -e
|
||||
cd $vendor_dir
|
||||
if [[ ! -d src/$PKG ]]; then
|
||||
hg clone https://$PKG src/$PKG
|
||||
fi
|
||||
cd src/$PKG && hg checkout -r $REV
|
||||
)
|
||||
hg_clone code.google.com/p/go.net 84a4013f96e0
|
||||
|
||||
hg_clone code.google.com/p/gosqlite 74691fb6f837
|
||||
|
||||
1
image.go
1
image.go
@@ -28,6 +28,7 @@ type Image struct {
|
||||
Author string `json:"author,omitempty"`
|
||||
Config *Config `json:"config,omitempty"`
|
||||
Architecture string `json:"architecture,omitempty"`
|
||||
OS string `json:"os,omitempty"`
|
||||
graph *Graph
|
||||
Size int64
|
||||
}
|
||||
|
||||
@@ -432,7 +432,6 @@ func TestGetContainersChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetContainersTop(t *testing.T) {
|
||||
t.Skip("Fixme. Skipping test for now. Reported error when testing using dind: 'api_test.go:527: Expected 2 processes, found 0.'")
|
||||
eng := NewTestEngine(t)
|
||||
defer mkRuntimeFromEngine(eng, t).Nuke()
|
||||
srv := mkServerFromEngine(eng, t)
|
||||
@@ -475,7 +474,7 @@ func TestGetContainersTop(t *testing.T) {
|
||||
})
|
||||
|
||||
r := httptest.NewRecorder()
|
||||
req, err := http.NewRequest("GET", "/"+containerID+"/top?ps_args=u", bytes.NewReader([]byte{}))
|
||||
req, err := http.NewRequest("GET", "/containers/"+containerID+"/top?ps_args=aux", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -498,11 +497,11 @@ func TestGetContainersTop(t *testing.T) {
|
||||
if len(procs.Processes) != 2 {
|
||||
t.Fatalf("Expected 2 processes, found %d.", len(procs.Processes))
|
||||
}
|
||||
if procs.Processes[0][10] != "/bin/sh" && procs.Processes[0][10] != "cat" {
|
||||
t.Fatalf("Expected `cat` or `/bin/sh`, found %s.", procs.Processes[0][10])
|
||||
if procs.Processes[0][10] != "/bin/sh -c cat" {
|
||||
t.Fatalf("Expected `/bin/sh -c cat`, found %s.", procs.Processes[0][10])
|
||||
}
|
||||
if procs.Processes[1][10] != "/bin/sh" && procs.Processes[1][10] != "cat" {
|
||||
t.Fatalf("Expected `cat` or `/bin/sh`, found %s.", procs.Processes[1][10])
|
||||
if procs.Processes[1][10] != "/bin/sh -c cat" {
|
||||
t.Fatalf("Expected `/bin/sh -c cat`, found %s.", procs.Processes[1][10])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -132,6 +132,23 @@ run [ "$(cat /e)" = "blah" ]
|
||||
[][2]string{{"/x", "hello"}, {"/", "blah"}},
|
||||
},
|
||||
|
||||
// Comments, shebangs, and executability, oh my!
|
||||
{
|
||||
`
|
||||
FROM {IMAGE}
|
||||
# This is an ordinary comment.
|
||||
RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
|
||||
RUN [ ! -x /hello.sh ]
|
||||
RUN chmod +x /hello.sh
|
||||
RUN [ -x /hello.sh ]
|
||||
RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
|
||||
RUN [ "$(/hello.sh)" = "hello world" ]
|
||||
`,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
|
||||
// Environment variable
|
||||
{
|
||||
`
|
||||
from {IMAGE}
|
||||
@@ -142,6 +159,19 @@ run [ "$FOO" = "BAR" ]
|
||||
nil,
|
||||
},
|
||||
|
||||
// Environment overwriting
|
||||
{
|
||||
`
|
||||
from {IMAGE}
|
||||
env FOO BAR
|
||||
run [ "$FOO" = "BAR" ]
|
||||
env FOO BAZ
|
||||
run [ "$FOO" = "BAZ" ]
|
||||
`,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
|
||||
{
|
||||
`
|
||||
from {IMAGE}
|
||||
@@ -391,6 +421,8 @@ func TestBuildEntrypoint(t *testing.T) {
|
||||
}
|
||||
|
||||
if img.Config.Entrypoint[0] != "/bin/echo" {
|
||||
t.Log(img.Config.Entrypoint[0])
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -425,61 +457,193 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildImageWithCache(t *testing.T) {
|
||||
func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) {
|
||||
eng := NewTestEngine(t)
|
||||
defer nuke(mkRuntimeFromEngine(eng, t))
|
||||
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
`,
|
||||
nil, nil}
|
||||
|
||||
img, err := buildImage(template, t, eng, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imageId := img.ID
|
||||
imageId = img.ID
|
||||
|
||||
img = nil
|
||||
img, err = buildImage(template, t, eng, true)
|
||||
img, err = buildImage(template, t, eng, expectHit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if imageId != img.ID {
|
||||
t.Logf("Image ids should match: %s != %s", imageId, img.ID)
|
||||
t.Fail()
|
||||
if hit := imageId == img.ID; hit != expectHit {
|
||||
t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkCacheBehaviorFromEngime(t *testing.T, template testContextTemplate, expectHit bool, eng *engine.Engine) (imageId string) {
|
||||
img, err := buildImage(template, t, eng, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imageId = img.ID
|
||||
|
||||
img, err = buildImage(template, t, eng, expectHit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if hit := imageId == img.ID; hit != expectHit {
|
||||
t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestBuildImageWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
`,
|
||||
nil, nil}
|
||||
checkCacheBehavior(t, template, true)
|
||||
}
|
||||
|
||||
func TestBuildImageWithoutCache(t *testing.T) {
|
||||
eng := NewTestEngine(t)
|
||||
defer nuke(mkRuntimeFromEngine(eng, t))
|
||||
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
`,
|
||||
nil, nil}
|
||||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
img, err := buildImage(template, t, eng, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
func TestBuildADDLocalFileWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
run echo "first"
|
||||
add foo /usr/lib/bla/bar
|
||||
run [ "$(cat /usr/lib/bla/bar)" = "hello" ]
|
||||
run echo "second"
|
||||
add . /src/
|
||||
run [ "$(cat /src/foo)" = "hello" ]
|
||||
`,
|
||||
[][2]string{
|
||||
{"foo", "hello"},
|
||||
},
|
||||
nil}
|
||||
eng := NewTestEngine(t)
|
||||
defer nuke(mkRuntimeFromEngine(eng, t))
|
||||
|
||||
id1 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
template.files = append(template.files, [2]string{"bar", "hello2"})
|
||||
id2 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id1 == id2 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
imageId := img.ID
|
||||
|
||||
img = nil
|
||||
img, err = buildImage(template, t, eng, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
id3 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id2 != id3 {
|
||||
t.Fatal("The cache should have been used but hasn't.")
|
||||
}
|
||||
template.files[1][1] = "hello3"
|
||||
id4 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id3 == id4 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
template.dockerfile += `
|
||||
add ./bar /src2/
|
||||
run ls /src2/bar
|
||||
`
|
||||
id5 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id4 == id5 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
template.files[1][1] = "hello4"
|
||||
id6 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id5 == id6 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
|
||||
if imageId == img.ID {
|
||||
t.Logf("Image ids should not match: %s == %s", imageId, img.ID)
|
||||
t.Fail()
|
||||
template.dockerfile += `
|
||||
add bar /src2/bar2
|
||||
add /bar /src2/bar3
|
||||
run ls /src2/bar2 /src2/bar3
|
||||
`
|
||||
id7 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id6 == id7 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
template.files[1][1] = "hello5"
|
||||
id8 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id7 == id8 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildADDLocalFileWithoutCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
run echo "first"
|
||||
add foo /usr/lib/bla/bar
|
||||
run echo "second"
|
||||
`,
|
||||
[][2]string{{"foo", "hello"}},
|
||||
nil}
|
||||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
func TestBuildADDRemoteFileWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
run echo "first"
|
||||
add http://{SERVERADDR}/baz /usr/lib/baz/quux
|
||||
run echo "second"
|
||||
`,
|
||||
nil,
|
||||
[][2]string{{"/baz", "world!"}}}
|
||||
checkCacheBehavior(t, template, true)
|
||||
}
|
||||
|
||||
func TestBuildADDRemoteFileWithoutCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
run echo "first"
|
||||
add http://{SERVERADDR}/baz /usr/lib/baz/quux
|
||||
run echo "second"
|
||||
`,
|
||||
nil,
|
||||
[][2]string{{"/baz", "world!"}}}
|
||||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
run echo "first"
|
||||
add foo /usr/lib/bla/bar
|
||||
add http://{SERVERADDR}/baz /usr/lib/baz/quux
|
||||
run echo "second"
|
||||
`,
|
||||
[][2]string{{"foo", "hello"}},
|
||||
[][2]string{{"/baz", "world!"}}}
|
||||
checkCacheBehavior(t, template, true)
|
||||
}
|
||||
|
||||
func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
run echo "first"
|
||||
add foo /usr/lib/bla/bar
|
||||
add http://{SERVERADDR}/baz /usr/lib/baz/quux
|
||||
run echo "second"
|
||||
`,
|
||||
[][2]string{{"foo", "hello"}},
|
||||
[][2]string{{"/baz", "world!"}}}
|
||||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
func TestForbiddenContextPath(t *testing.T) {
|
||||
@@ -630,3 +794,11 @@ func TestBuildFails(t *testing.T) {
|
||||
t.Fatalf("StatusCode %d unexpected, should be 23", sterr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildFailsDockerfileEmpty(t *testing.T) {
|
||||
_, err := buildImage(testContextTemplate{``, nil, nil}, t, nil, true)
|
||||
|
||||
if err != docker.ErrDockerfileEmpty {
|
||||
t.Fatal("Expected: %v, got: %v", docker.ErrDockerfileEmpty, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"github.com/dotcloud/docker/pkg/term"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -968,3 +968,66 @@ func TestRunCidFile(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestContainerOrphaning(t *testing.T) {
|
||||
|
||||
// setup a temporary directory
|
||||
tmpDir, err := ioutil.TempDir("", "project")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// setup a CLI and server
|
||||
cli := docker.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
|
||||
defer cleanup(globalEngine, t)
|
||||
srv := mkServerFromEngine(globalEngine, t)
|
||||
|
||||
// closure to build something
|
||||
buildSomething := func(template string, image string) string {
|
||||
dockerfile := path.Join(tmpDir, "Dockerfile")
|
||||
replacer := strings.NewReplacer("{IMAGE}", unitTestImageID)
|
||||
contents := replacer.Replace(template)
|
||||
ioutil.WriteFile(dockerfile, []byte(contents), 0x777)
|
||||
if err := cli.CmdBuild("-t", image, tmpDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
img, err := srv.ImageInspect(image)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return img.ID
|
||||
}
|
||||
|
||||
// build an image
|
||||
imageName := "orphan-test"
|
||||
template1 := `
|
||||
from {IMAGE}
|
||||
cmd ["/bin/echo", "holla"]
|
||||
`
|
||||
img1 := buildSomething(template1, imageName)
|
||||
|
||||
// create a container using the fist image
|
||||
if err := cli.CmdRun(imageName); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// build a new image that splits lineage
|
||||
template2 := `
|
||||
from {IMAGE}
|
||||
cmd ["/bin/echo", "holla"]
|
||||
expose 22
|
||||
`
|
||||
buildSomething(template2, imageName)
|
||||
|
||||
// remove the second image by name
|
||||
resp, err := srv.ImageDelete(imageName, true)
|
||||
|
||||
// see if we deleted the first image (and orphaned the container)
|
||||
for _, i := range resp {
|
||||
if img1 == i.Deleted {
|
||||
t.Fatal("Orphaned image with container")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user