Compare commits

...

643 Commits

Author SHA1 Message Date
Guillaume J. Charmes
28b162eeb4 Merge pull request #3233 from crosbymichael/bump_0.7.2
Bump to 0.7.2
2013-12-16 15:06:42 -08:00
Michael Crosby
e960152a1e Bump to v0.7.2 2013-12-16 14:50:07 -08:00
Guillaume J. Charmes
47375ddf54 Merge pull request #3230 from crosbymichael/allow-untag
Allow untag operations with no container validation
2013-12-16 14:34:56 -08:00
Michael Crosby
f0d6a91a1b Merge pull request #3217 from SvenDowideit/deal-with-changing-paths-for-lxc-start
lxc-start-unconfined softlink can go bad
2013-12-16 13:38:03 -08:00
Michael Crosby
62213ee314 Allow untag operations with no container validation 2013-12-16 13:29:43 -08:00
Guillaume J. Charmes
41d972baf1 Merge pull request #3219 from unclejack/vagrant_fix_version_check
install vbox guest additions if the latest aren't already installed
2013-12-16 12:49:32 -08:00
Guillaume J. Charmes
b3ad330782 Merge pull request #3099 from vieux/fix_pull_build
added authConfig to docker build
2013-12-16 10:53:10 -08:00
Tianon Gravi
606cacdca0 Merge pull request #3222 from gurjeet/zfs_driver_owner
Update readme to mark ZFS driver as Alpha quality.
2013-12-15 07:09:00 -08:00
Gurjeet Singh
d526038503 Update readme to mark ZFS driver as Alpha quality. 2013-12-15 09:17:16 -05:00
unclejack
12fb508262 install vbox guest additions if not latest 2013-12-14 16:00:52 +02:00
Sven Dowideit
0a3eedd4c9 when sharing a /var/lib/docker dir with more than one distribution, an existing lxc-start-unconfined softlink may point to a non-existant path, following that link (as Stat does) will cause the daemon to fail to start 2013-12-14 15:29:08 +10:00
Guillaume J. Charmes
a6928e70ac Merge pull request #3197 from ajhager/3138-names
Validate container names on creation. Fixes #3138
2013-12-13 17:28:36 -08:00
Guillaume J. Charmes
20197385b2 Merge pull request #3173 from vieux/docker_info_job
Move info to job
2013-12-13 17:27:59 -08:00
Victor Vieux
85b9338205 add GetenvInt64 ans SetenvInt64 2013-12-13 16:29:22 -08:00
Victor Vieux
51e2c1794b move docker info to the job api 2013-12-13 16:15:15 -08:00
Guillaume J. Charmes
20899cdb34 Merge pull request #3183 from vieux/job_commit
Move commit to job
2013-12-13 16:11:58 -08:00
Guillaume J. Charmes
f5ab2516d8 Merge pull request #2897 from crosbymichael/aufs-42
Increase max image depth to 127
2013-12-13 16:03:57 -08:00
Victor Vieux
d5f5ecb658 improve GetenvJson 2013-12-13 16:02:19 -08:00
Victor Vieux
4b5ceb0f24 use args 2013-12-13 14:29:27 -08:00
Andy Rothfusz
906b481148 Merge pull request #3213 from metalivedev/1695-dockerlogs
Add more information about Docker logging
2013-12-13 14:29:14 -08:00
Victor Vieux
930ec9f52c move commit to job 2013-12-13 14:19:56 -08:00
Guillaume J. Charmes
aaa1c48d24 Merge pull request #3175 from vieux/engine-job-stop
Move stop to job
2013-12-13 14:15:58 -08:00
Victor Vieux
d7123a597f Merge pull request #3214 from dotcloud/shykes_maintainer
Temporarily remove @shykes from engine/MAINTAINERS
2013-12-13 14:03:08 -08:00
Guillaume J. Charmes
9a9ecda7c8 Merge pull request #3208 from WarheadsSE/bridgeip
Add -bip flag: allow specification of dynamic bridge IP via CIDR
2013-12-13 13:56:35 -08:00
Guillaume J. Charmes
071338172c Merge pull request #3187 from vieux/resize_job
Move resize to job
2013-12-13 13:55:23 -08:00
Victor Vieux
4975c1b549 Temporarily remve @shykes from engine/MAINTAINERS 2013-12-13 13:51:20 -08:00
Victor Vieux
73e8a39ff2 move resize to job 2013-12-13 13:15:39 -08:00
Victor Vieux
847cf5b599 Merge branch 'master' of https://github.com/dotcloud/docker 2013-12-13 13:15:22 -08:00
Michael Crosby
bf91636558 Merge pull request #3210 from rsampaio/fix_bridge_creation_3141
Bridge creation when ipv6 is not enabled
2013-12-13 12:03:55 -08:00
Andy Rothfusz
1e85aabf71 Fix #1695 by adding more about logging. 2013-12-13 11:42:58 -08:00
Michael Crosby
4fe0a9b6a0 Merge pull request #3211 from tianon/hack-make-cover
Add new cover bundlescript for giving a nice report across all the coverprofiles
2013-12-13 11:17:03 -08:00
Joseph Hager
f63cdf0260 Validate container names on creation. Fixes #3138
Move valid container name regex to the top of the file

Added hyphen as a valid rune in container names.

Remove group in valid container name regex.
2013-12-13 14:14:05 -05:00
Victor Vieux
9fb1ba97b1 Merge branch 'master' of https://github.com/dotcloud/docker 2013-12-13 11:06:20 -08:00
Tianon Gravi
59dc2876a7 Add new cover bundlescript for giving a nice report across all the coverprofiles generated by the test scripts 2013-12-13 11:59:54 -07:00
Tianon Gravi
23ab0af2ff Merge pull request #3132 from tianon/hack-separate-integration
Separate Integration Tests
2013-12-13 10:55:49 -08:00
Victor Vieux
b8a16b3459 Merge pull request #3194 from tianon/tianon-hack-maintainer
Make Tianon the hack maintainer
2013-12-13 10:55:07 -08:00
Rodrigo Vaz
a530b8d981 fix #3141 Bridge creation when ipv6 is not enabled 2013-12-13 16:39:49 -02:00
Victor Vieux
89beb55c32 Merge branch 'master' of https://github.com/dotcloud/docker 2013-12-13 10:38:26 -08:00
Victor Vieux
f9328ad9cc Merge pull request #3201 from jpoimboe/libvirt-prereq-network
Set hostname and IP address from dockerinit
2013-12-13 10:38:17 -08:00
Victor Vieux
20759c3ef7 Merge branch 'libvirt-prereq-network' of https://github.com/jpoimboe/docker 2013-12-13 10:34:09 -08:00
Victor Vieux
5d81776714 Merge pull request #3202 from jpoimboe/libvirt-prereq-env
dockerinit: propagate "container" env variable from lxc
2013-12-13 10:32:17 -08:00
Tianon Gravi
0ef1ff91cb Merge pull request #3151 from tianon/more-debootstrap-tweaks
Update mkimage-debootstrap with even more tweaks for keeping images tiny...
2013-12-13 09:28:11 -08:00
WarheadsSE
a68d7f3d70 Add -bip flag: allow specification of dynamic bridge IP via CIDR
e.g.:

```
docker -d -bip "10.10.0.1/16"
```

If set and valid, use provided in place of trial and error from pre-defined array in network.go.
Mutually exclusive of -b option.
2013-12-13 10:47:19 -05:00
Tianon Gravi
5a89c6f6df Merge pull request #3192 from unclejack/update_virtualbox_guest_additions
vagrant: update & verify virtualbox guest tools
2013-12-12 21:22:29 -08:00
Josh Poimboeuf
e877294321 dockerinit: propagate "container" env variable from lxc
Lxc (and libvirt) already set the "container" env variable
appropriately[1], so just use that.

[1] http://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
2013-12-12 20:08:58 -06:00
Josh Poimboeuf
ecc51cd465 dockerinit: set IP address
Set the IP address in dockerinit instead of lxc utils, to prepare for
using libvirt-lxc.
2013-12-12 19:57:11 -06:00
Josh Poimboeuf
f7c7f7978c dockerinit: set hostname
Set the hostname in dockerinit instead of with lxc utils.  libvirt-lxc
doesn't have a way to do this, so do it in a common place.
2013-12-12 19:56:05 -06:00
Michael Crosby
8224e13bd2 Merge pull request #3185 from vieux/job_tag
Move tag to job
2013-12-12 17:02:39 -08:00
Guillaume J. Charmes
912bf8ff92 Merge pull request #3015 from jpoimboe/dockerinit-libvirt-prereqs
dockerinit: drop capabilities
2013-12-12 15:49:21 -08:00
Victor Vieux
e43ff2f6f2 move tag to job 2013-12-12 11:52:11 -08:00
Josh Poimboeuf
b8f1c73705 dockerinit: drop capabilities
Drop capabilities in dockerinit instead of with lxc utils, since
libvirt-lxc doesn't support it.

This will also be needed for machine container mode, since dockerinit
needs CAP_SYS_ADMIN to setup /dev/console correctly.
2013-12-12 13:47:24 -06:00
Josh Poimboeuf
1572989201 dockerinit: refactor error handling 2013-12-12 13:47:24 -06:00
Josh Poimboeuf
bd02d6e662 dockerinit: put args in a struct 2013-12-12 13:47:23 -06:00
Andy Rothfusz
2d1f61ef0e Merge pull request #3190 from zain/master
Small typo fixes
2013-12-12 11:25:11 -08:00
Andy Rothfusz
54df95f26c Merge pull request #3189 from aknikitin/patch-1
Minor spelling fix
2013-12-12 11:24:45 -08:00
Guillaume J. Charmes
5b33ae5971 Merge pull request #3145 from vieux/fix_docker_images
multiple fixed in docker images
2013-12-12 11:17:19 -08:00
Tianon Gravi
0db1c60542 Make Tianon the hack maintainer 2013-12-12 11:25:30 -07:00
unclejack
f216448c82 vagrant: update & verify virtualbox guest tools 2013-12-12 13:03:33 +02:00
Zain Memon
f26a9d456c Small typo fixes 2013-12-12 01:23:16 -08:00
Anton Nikitin
bf5b949ffc Minor spelling fix 2013-12-12 01:09:24 -05:00
Victor Vieux
621523a041 Merge pull request #3184 from creack/fix-volumes-on-host
Fix volumes on host
2013-12-11 18:06:25 -08:00
Guillaume J. Charmes
8fd9633a6b Improve FollowLink to handle recursive link and be more strick 2013-12-11 17:19:02 -08:00
Victor Vieux
1124261158 Merge pull request #3144 from codeaholics/643-stale-nfs-handle
Prevent deletion of image if ANY container is depending on it; not just running containers
2013-12-11 17:18:54 -08:00
Andy Rothfusz
b722f809e7 Merge pull request #3181 from lsm5/rhel-docs-typos
Rhel docs typos
2013-12-11 16:37:24 -08:00
Michael Crosby
f396c42cad Fix volumes on the host by following symlinks in a scope 2013-12-11 16:31:02 -08:00
Lokesh Mandvekar
8874f2aef9 keeping rhel page sorta in sync with fedora
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 18:26:17 -06:00
Lokesh Mandvekar
e8ec3dba7b remove step numbers, keep consistent with fedora
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 18:21:52 -06:00
Andy Rothfusz
4eda2a54de Merge pull request #3177 from tianon/fix-turnbull-github
Fix James's github handle in docs/MAINTAINERS
2013-12-11 16:00:06 -08:00
Andy Rothfusz
d3292078dc Merge pull request #3176 from lsm5/rhel-docs
Rhel docs
2013-12-11 15:58:42 -08:00
Victor Vieux
6ba456ff87 move t from arg to env 2013-12-11 15:36:50 -08:00
Lokesh Mandvekar
44984602c7 more typo corrections
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 16:36:14 -06:00
Lokesh Mandvekar
d534e1c3a1 some typo corrections
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 16:20:54 -06:00
Andy Rothfusz
d56d8ab96e Merge pull request #3174 from richo/features/https_install_script
Use https to get the install script
2013-12-11 14:09:22 -08:00
Andy Rothfusz
6cf8ec606e Merge pull request #3161 from SvenDowideit/make-replace-docker-binary-note-more-obvious
associate swapping the built docker binary with building the binary, rather than a note in building the docs
2013-12-11 14:04:34 -08:00
Lokesh Mandvekar
db3019d50b rhel page keywords update
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 16:00:46 -06:00
Lokesh Mandvekar
42c38bf34d rhel description update
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 15:59:35 -06:00
Andy Rothfusz
11b3fbb3bd Merge pull request #3167 from qbrossard/patch-1
Corrected typo (resdis -> redis)
2013-12-11 13:57:59 -08:00
Andy Rothfusz
036f41fde3 Merge pull request #3165 from SvenDowideit/cmd-rmi-example
add example for docker rmi, and explain the need to remove all references (tags) to and image before its garbage collected :)
2013-12-11 13:57:13 -08:00
Andy Rothfusz
6e9c1590c6 Merge pull request #3162 from SvenDowideit/docker-commit-example-change-CMD
add a direct example for changing the cmd that is run
2013-12-11 13:52:12 -08:00
Tianon Gravi
39cc8a32b1 Fix James's github handle in docs/MAINTAINERS 2013-12-11 14:13:55 -07:00
Lokesh Mandvekar
31961ccd94 rhel page only for rhel
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 14:36:12 -06:00
Lokesh Mandvekar
eec48f93a3 rhel docs update
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-11 14:34:51 -06:00
Solomon Hykes
dbe1915fee Engine: new command 'stop' gracefully stops a container. 2013-12-11 11:52:59 -08:00
Solomon Hykes
bef8de9319 Engine: integer job status, improved stream API
* Jobs return an integer status instead of a string
* Status convention mimics unix process execution: 0=success, 1=generic error, 127="no such command"
* Stdout and Stderr support multiple thread-safe data receivers and ring buffer filtering
2013-12-11 11:52:59 -08:00
Richo Healey
81fc368a6d Use https to get the install script 2013-12-11 11:27:36 -08:00
Michael Crosby
bd292759f0 Merge pull request #3153 from vieux/improve_docker_push_display
Update docker push to use new display
2013-12-11 11:11:53 -08:00
Michael Crosby
5fd3c8204d Merge pull request #2735 from shykes/engine-job-kill
New engine command: 'kill'
2013-12-11 10:35:57 -08:00
Quentin Brossard
af21908493 Corrected typo (resdis -> redis) 2013-12-11 13:15:27 +01:00
Sven Dowideit
7edd1f6bad add example for docker rmi, and explain the need to remove all references (tags) to and image before its garbage collected :) 2013-12-11 15:54:34 +10:00
Sven Dowideit
d878632b25 add a direct example for changing the cmd that is run 2013-12-11 12:07:07 +10:00
Sven Dowideit
be13735001 associate swapping the built docker binary with building the binary, rather than a note in building the docs 2013-12-11 11:12:11 +10:00
Andy Rothfusz
fb9ddc5de5 Merge pull request #3159 from SvenDowideit/make-docs-consistency
Makefile: make docs is more consistent
2013-12-10 16:51:35 -08:00
Sven Dowideit
27646c4459 make docs is more consistent 2013-12-11 10:14:56 +10:00
Victor Vieux
b98d51dddb revert 'firstErr' 2013-12-10 15:37:03 -08:00
Victor Vieux
0025e9bd71 Merge pull request #3113 from shykes/engine-export
Move 'docker export' to the engine API
2013-12-10 13:28:24 -08:00
Victor Vieux
4c6e528f13 Merge pull request #3152 from daniel-garcia/3129_dont-open-bindmounted-files
don't open bind mounted files/dirs to get Stat, use os.Lstat
2013-12-10 11:05:39 -08:00
Victor Vieux
95f061b408 update docker push to use [====> ] 2013-12-10 10:57:16 -08:00
Daniel Garcia
761184df52 don't open bind mounted files/dirs to get Stat, use os.Lstat 2013-12-10 12:49:53 -06:00
Tianon Gravi
78b85220be Update mkimage-debootstrap with even more tweaks for keeping images tiny by more aggressively removing cache files and by not downloading apt-cache Translations files 2013-12-10 10:59:32 -07:00
Andy Rothfusz
8814c11b14 Merge pull request #3103 from metalivedev/1229-titleactions
Update "Use" titles to be action-oriented
2013-12-09 18:57:48 -08:00
Victor Vieux
09d2c2351c Merge pull request #3119 from shykes/engine-version
Port 'docker version' to the engine API
2013-12-09 17:35:44 -08:00
Victor Vieux
c618a906a4 fix size in -tree 2013-12-09 17:27:05 -08:00
Andy Rothfusz
9c1e9a5157 Fix #1229. Update titles, fix some wrapping.
Make the Ambassador container explicit.
Apply Sven's suggestions.
2013-12-09 17:23:56 -08:00
Andy Rothfusz
0b0b0ca0f9 Merge pull request #3146 from jamtur01/linkedits
Some minor cleanup of the Links use document
2013-12-09 17:09:59 -08:00
Victor Vieux
ac1093b83a fix docker images -tree <invalid_image> and docker images -viz <image_name> 2013-12-09 16:58:17 -08:00
James Turnbull
c9cedb4c04 Some minor cleanup of the Links use document 2013-12-09 16:47:19 -08:00
Victor Vieux
a74be95b23 Merge pull request #2843 from shykes/engine-job-wait
New engine job:wait
2013-12-09 16:36:23 -08:00
Victor Vieux
8291f00a0e refactor and fix tests 2013-12-09 16:25:19 -08:00
Andy Rothfusz
b7bc80cba9 Merge pull request #3109 from artagnon/arch-install
Update Arch installation doc
2013-12-09 15:52:54 -08:00
Andy Rothfusz
864729b96f Merge pull request #3104 from jamtur01/uses
Added section on Trusted Builds
2013-12-09 15:51:52 -08:00
Andy Rothfusz
a67571668e Merge pull request #3143 from idupree/patch-1
Make it extra clear that the `docker` group is root-equivalent.
2013-12-09 15:48:05 -08:00
Danny Yates
776bb43c9e Prevent deletion of image if ANY container is depending on it; not just running containers 2013-12-09 20:46:21 +00:00
Michael Crosby
75bd5bea70 Merge pull request #3114 from shykes/hack-stats
Hack: stats.sh prints useful project stats for maintainers
2013-12-09 10:36:17 -08:00
Isaac Dupree
e2ee5c71fc Make it extra clear that the docker group is root-equivalent. 2013-12-09 12:25:20 -05:00
Tianon Gravi
f0879a1e14 Add separate "test-integration" bundlescript (and corresponding dyntest-integration bundlescript) 2013-12-08 18:43:24 -07:00
Tianon Gravi
ca405786f4 Unify dyntest/test and dynbinary/binary hack bundlescripts further by cross-invocation and keeping all the logic in one place, taking advantage of LDFLAGS_STATIC that is the only bit that gets replaced for dyntest/dynbinary 2013-12-08 18:40:05 -07:00
Michael Crosby
cdc07f7d5c Merge pull request #3126 from unclejack/remove_vendored_tar
Remove vendored dotcloud/tar
2013-12-08 16:51:52 -08:00
Tianon Gravi
45cea94a82 Unify hack/make/*test further by invoking hack/make/test directly from dyntest 2013-12-08 15:34:08 -07:00
unclejack
8ec96c9605 remove vendored dotcloud/tar
The tar dependency has been removed. It's
time to remove the vendored tar as well.
2013-12-09 00:02:13 +02:00
James Turnbull
c094807a1b Added section on Trusted Builds 2013-12-08 15:54:12 -05:00
Tianon Gravi
bac3a8e6f5 Add much better pruning of non-tested directories, including pruning the integration tests directory (doing more with "find" and nothing with "grep") 2013-12-08 13:50:48 -07:00
Tianon Gravi
dcfc4ada4d Clean output and simplify hack/make/*test by adding go_test_dir function in make.sh 2013-12-08 13:49:57 -07:00
Tianon Gravi
416b16e1e2 Simplify and resync hack/make/test and hack/make/dyntest output handling 2013-12-08 12:57:11 -07:00
Ramkumar Ramachandra
f832b76bdf archlinux installation doc: correct some details
1. The AUR package is called docker-git, not lxc-docker-git.

2. According to the official community package, docker depends on
   sqlite.

3. 02ef8ec (Update archlinux.rst as packages have changed, 2013-12-06)
   updated the installation instructions, but left behind residual
   wording about the AUR package not being officially supported; the
   community repository is officially supported.

Signed-off-by: Ramkumar Ramachandra <artagnon@gmail.com>
2013-12-08 15:36:02 +05:30
Solomon Hykes
d502f0cfac Merge pull request #3118 from shykes/engine-structured-output
Engine: jobs can send structured output as json on stdout
2013-12-07 23:46:50 -08:00
Solomon Hykes
16fad96007 Merge pull request #3117 from shykes/engine-refactor-env
Engine: break out Env utilities into their own type - Env
2013-12-07 23:45:00 -08:00
Solomon Hykes
de35b346d1 Port 'docker version' to the engine API 2013-12-08 07:41:53 +00:00
Solomon Hykes
869a11bc93 Cleanup version introspection
* Unify version checking code into version.go
* Make 'version' available as a job in the engine
* Use simplified version checking code when setting user agent for registry client.
2013-12-08 07:35:24 +00:00
Solomon Hykes
f806818154 Engine: convenience http transport for simple remote job execution 2013-12-08 07:33:23 +00:00
Solomon Hykes
a7a171b6c2 Engine: Output.AddEnv decodes structured data from the standard output of a job 2013-12-08 06:16:10 +00:00
Solomon Hykes
a80c059bae Engine: break out Env utilities into their own type - Env 2013-12-08 06:06:05 +00:00
Solomon Hykes
edace08327 Hack: stats.sh prints useful project stats for maintainers 2013-12-08 01:47:03 +00:00
Solomon Hykes
9656cdf0c2 Engine: 'export' returns a raw archive of a container's filesystem 2013-12-08 01:33:37 +00:00
Solomon Hykes
50f3a696bd Engine: don't log job stdout to engine stdout (it might be non-text output, for example tar data for 'export' 2013-12-08 01:33:05 +00:00
Solomon Hykes
f4676f0ffa Merge pull request #3101 from creack/merge_release
Merge release
2013-12-06 18:09:38 -08:00
Guillaume J. Charmes
3c1f3be032 Update version 2013-12-06 17:31:09 -08:00
Guillaume J. Charmes
aeba4e6482 Merge remote-tracking branch 'origin/release' into merge_release 2013-12-06 17:30:52 -08:00
Solomon Hykes
3569d080af New engine command: 'wait' 2013-12-06 23:05:21 +00:00
Solomon Hykes
427bdb60e7 Engine: port 'kill' to the new integer status. 2013-12-06 23:02:27 +00:00
Solomon Hykes
9b1930c5a0 gofmt 2013-12-06 23:02:27 +00:00
Solomon Hykes
2546a2c645 Hack: use new 'kill' command in integration tests 2013-12-06 23:02:27 +00:00
Solomon Hykes
fdb3de7b11 Engine: new command 'kill' sends a signal to a running container 2013-12-06 23:02:27 +00:00
Guillaume J. Charmes
88df052197 Merge pull request #3098 from creack/bump_v0.7.1
Bump v0.7.1
2013-12-06 14:42:14 -08:00
Guillaume J. Charmes
04ffa53ba8 Merge pull request #3077 from jlhawn/3076-handle-inactive-user-login
Adjusted handling of inactive user login
2013-12-06 14:40:35 -08:00
Andy Rothfusz
07f7643bbc Merge pull request #3030 from jamtur01/versions
Fixed #2136 - Added styles
2013-12-06 14:27:53 -08:00
Victor Vieux
228091c79e added authConfig to docker build 2013-12-06 14:27:10 -08:00
Andy Rothfusz
6fa1463614 Merge pull request #3094 from tang0th/patch-1
Update archlinux.rst as packages have changed
2013-12-06 14:18:31 -08:00
Victor Vieux
f28445254f disable progressbar in non-terminal 2013-12-06 14:15:40 -08:00
Victor Vieux
0969be5ddb update doc 2013-12-06 14:15:40 -08:00
Victor Vieux
95c0ade04b fix jsonmessage in build 2013-12-06 14:15:40 -08:00
Tianon Gravi
e01732f857 Revert "Add cgroup-bin dependency to our Ubuntu package"
This reverts commit c81bb20f5b.

After re-reading the documentation: "The Recommends field should list packages that would be found together with this one in all but unusual installations."

Thus, "Recommends" is an acceptable place for this dep, and anyone disabling that gets to keep the pieces.

The main crux of why this needs to be reverted is because it breaks Debian completely because "lxc" and "cgroup-bin" can't be installed concurrently.
2013-12-06 14:15:40 -08:00
Guillaume J. Charmes
9b644ff246 Merge pull request #3096 from dotcloud/fix_fix_jsonmessage
fix jsonmessage in build
2013-12-06 14:10:24 -08:00
Victor Vieux
2c646b2d46 disable progressbar in non-terminal 2013-12-06 14:09:27 -08:00
Victor Vieux
becb13dc26 update doc 2013-12-06 14:09:27 -08:00
Victor Vieux
05f416d869 fix jsonmessage in build 2013-12-06 14:09:27 -08:00
Andy Rothfusz
7fd64e0196 Merge pull request #3088 from SvenDowideit/start-cmdline-examples-with-dollar-for-easier-testing
change the policy wrt $ sudo docker to simplify auto-testing
2013-12-06 13:37:02 -08:00
Sven Dowideit
13da09d22b change the policy wrt $ sudo docker to simplify auto-testing 2013-12-07 07:23:53 +10:00
Josh Hawn
6720bfb243 Adjusted handling of inactive user login
The return status for inactive users was being checked
too early in the process, so I moved it from just after
the handling of POST /v1/users/ to after getting the
response from GET /v1/users/
2013-12-06 11:57:05 -08:00
Andy Rothfusz
d75fc6e529 Merge pull request #3071 from lsm5/fedora-docs-update
use mattdm/fedora in fedora doc and other cosmetic changes
2013-12-06 11:25:10 -08:00
Andy Rothfusz
4a148919c3 Merge pull request #3052 from shawnl/patch-1
nftables dependancies in kernel
2013-12-06 11:02:49 -08:00
Guillaume J. Charmes
c7d75588f4 Merge pull request #3079 from crosbymichael/give-engine-noop-tests
Enable engine to take Stderr and Stdout for mocking in tests
2013-12-06 10:43:39 -08:00
Guillaume J. Charmes
dfade9e2d8 Merge pull request #3095 from jpoimboe/missing-defines
devmapper: add missing defines
2013-12-06 10:31:42 -08:00
Guillaume J. Charmes
b655406faa Merge pull request #3085 from tianon/fix-cgroup-dep
Revert "Add cgroup-bin dependency to our Ubuntu package"
2013-12-06 09:15:41 -08:00
Josh Poimboeuf
a015f38f4a devmapper: add missing defines
Add some missing defines which are needed for compiling on older systems
like RHEL 6.
2013-12-06 10:13:47 -06:00
tang0th
02ef8ec3ca Update archlinux.rst as packages have changed
The docker package has been added into the Arch Linux community repo, this means that the package names and installation instructions have slightly changed.
2013-12-06 15:47:24 +00:00
Michael Crosby
25d3db048e Enable engine to take Stderr and Stdout for mocking in tests 2013-12-06 01:18:18 -08:00
Shawn Landden
a69bb25820 specific kernel config 2013-12-05 23:54:23 -08:00
Andy Rothfusz
5f5949f6a6 Merge pull request #3086 from metalivedev/3045-addmirrors
Add debian mirrors. Fixes #3045.
2013-12-05 18:16:28 -08:00
Andy Rothfusz
58b75f8f29 Add debian mirrors. Fixes #3045. 2013-12-05 18:08:56 -08:00
Tianon Gravi
aea7418d8a Revert "Add cgroup-bin dependency to our Ubuntu package"
This reverts commit c81bb20f5b.

After re-reading the documentation: "The Recommends field should list packages that would be found together with this one in all but unusual installations."

Thus, "Recommends" is an acceptable place for this dep, and anyone disabling that gets to keep the pieces.

The main crux of why this needs to be reverted is because it breaks Debian completely because "lxc" and "cgroup-bin" can't be installed concurrently.
2013-12-05 19:03:47 -07:00
Andy Rothfusz
f9147effac Merge pull request #3069 from proppy/patch-1
docs/installation/google: add enabling Google Compute Engine step
2013-12-05 17:40:42 -08:00
Andy Rothfusz
0e2b0f284c Merge pull request #3001 from dotcloud/api_json
add docs for the new json format
2013-12-05 17:35:51 -08:00
Andy Rothfusz
80dfa23da8 Merge pull request #3051 from pariviere/2490-docs-network
Network documentation page
2013-12-05 17:29:54 -08:00
Guillaume J. Charmes
bc9b239d74 Merge pull request #3081 from creack/bump_0.7.1
Bump to 0.7.1
2013-12-05 17:19:06 -08:00
Andy Rothfusz
4bea68dfa6 Clean up quoting, wraps, and build error on code-block. 2013-12-05 17:16:31 -08:00
Andy Rothfusz
ea0ed9a915 Merge branch 'docker-run-prose-2149' of github.com:SvenDowideit/docker into 3036-test 2013-12-05 17:03:26 -08:00
Guillaume J. Charmes
e39d35deda Bump to 0.7.1 2013-12-05 16:49:39 -08:00
Guillaume J. Charmes
4acd579226 Merge pull request #3078 from crosbymichael/remove-name-if-not-exist
If container does not exist try to remove the name and continue
2013-12-05 16:41:43 -08:00
Victor Vieux
c764fb0c29 Merge pull request #3006 from tianon/ubuntu-cgroup-bin
Add cgroup-bin dependency to our Ubuntu package
2013-12-05 16:41:01 -08:00
Guillaume J. Charmes
de090116dd Merge branch 'master' into release
Conflicts:
	commands.go
2013-12-05 16:40:24 -08:00
Victor Vieux
7a87023587 Merge pull request #2955 from crosbymichael/search-name-first
Search for repo first before image id
2013-12-05 16:16:55 -08:00
Tianon Gravi
584164177e Merge pull request #2358 from tianon/mkimage-rinse
Remove mkimage-centos.sh in favor of a new rinse-based script...
2013-12-05 16:07:59 -08:00
Victor Vieux
35e80868ad Merge pull request #3004 from tianon/shebang-bash
Update bundlescript shebangs to be bash, reflecting how they're actually invoked
2013-12-05 16:04:09 -08:00
Tianon Gravi
2acea6090f Merge pull request #2983 from tianon/udev
Add udev rules files for hiding the docker loopback devices from udisks
2013-12-05 16:01:04 -08:00
Andy Rothfusz
81b25fde79 Merge pull request #3065 from SvenDowideit/use-makefile-in-devenv-documentation
use the Makefile in the dev environment documentation - its way less typing, and fewer typing mistakes
2013-12-05 15:27:36 -08:00
Andy Rothfusz
0189a99471 Merge pull request #3062 from SvenDowideit/cli-examples-dollar-sudo-docker
Some examples didnt use $ sudo docker, so this makes it a little more consistent
2013-12-05 15:25:09 -08:00
Michael Crosby
7bf3a07371 If container does not exist try to remove the name and continue 2013-12-05 15:22:21 -08:00
Andy Rothfusz
9320f4e2d1 Merge pull request #3061 from shykes/sven-doc-maintainers
Add Sven Dowideit to docs maintainers
2013-12-05 15:17:34 -08:00
Guillaume J. Charmes
d1a4f83e5e Merge pull request #3075 from dotcloud/pull_layers
Wait on pull from another client
2013-12-05 14:47:39 -08:00
Victor Vieux
fb810b54ff wait on pull from another client 2013-12-05 14:41:56 -08:00
Pierre-Alain RIVIERE
eac95671f5 refs #2490 : add a network page to docs 2013-12-05 23:40:33 +01:00
Michael Crosby
06379d8bd9 Merge pull request #2992 from jpoimboe/bridge-create-ioctl
Create the bridge device with ioctl
2013-12-05 14:40:03 -08:00
Andy Rothfusz
a96bf74397 include Red Hat Enterprise, should have part of #3011
oops, forgot the index!
2013-12-05 14:34:10 -08:00
Andy Rothfusz
cc0466bb68 Merge pull request #3011 from goldmann/rhel_instructions
Instructions on how to install Docker on Red Hat Enterprise Linux / CentOS
2013-12-05 14:30:31 -08:00
Andy Rothfusz
0a7e0f0819 Merge pull request #3034 from rogaha/fix_doc_registry_index_spec
- Updated the doc according to the current implementation status
2013-12-05 14:24:13 -08:00
Guillaume J. Charmes
ef157cee30 Merge pull request #3074 from crosbymichael/dm-history-calc
Save layersize on pull
2013-12-05 14:13:03 -08:00
Lokesh Mandvekar
7ab4f37d60 separate block for yum update docker
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-05 16:11:33 -06:00
Lokesh Mandvekar
5d022f0445 add unofficial header back, yum update docker
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-05 16:08:08 -06:00
Michael Crosby
697707e4af Save layersize on pull
Do not display size and virtual size on the cli.
Only display virtual size on the cli
2013-12-05 14:03:23 -08:00
Andy Rothfusz
e3c3f3c324 Merge pull request #3010 from SvenDowideit/origin/docker-ps-linked-container-alias-example
add an example of docker ps, and also of link aliases
2013-12-05 14:00:53 -08:00
Guillaume J. Charmes
797bac2344 Merge pull request #3057 from crosbymichael/test-cpu-share
Move test cpu shares out of test start
2013-12-05 13:58:01 -08:00
Josh Poimboeuf
7a94cdf8ed create the bridge device with ioctl
On RHEL 6, creation of a bridge device with netlink fails.  Use the more
backward-compatible ioctl instead.  This fixes networking on RHEL 6.
2013-12-05 15:32:15 -06:00
Lokesh Mandvekar
61fbf3d8e2 yum upgrade on fedora not required before install
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-05 14:21:03 -06:00
Lokesh Mandvekar
f49eb29497 use mattdm/fedora in fedora doc and other cosmetic changes
Signed-off-by: Lokesh Mandvekar <lsm5@redhat.com>
2013-12-05 14:03:17 -06:00
Guillaume J. Charmes
1525f71b5a Merge pull request #3016 from pnasrat/3014-clarify-maintainer-process
Add a proposal step for potential MAINTAINERS.
2013-12-05 11:28:30 -08:00
Guillaume J. Charmes
4188cd6bcd Merge pull request #3024 from jamtur01/ubunturepo
Added note about Ubuntu curl installation
2013-12-05 11:27:46 -08:00
Andy Rothfusz
e304e8936b Merge pull request #3038 from SvenDowideit/2485-ambassador-pattern-doc
initial stab at writing a use/ambassador-pattern howto
2013-12-05 11:01:27 -08:00
James Turnbull
03f8a3bbae Fixed #2136 - Added styles
Added styling for versionadded, versionchanged, and
deprecated.
2013-12-05 13:46:15 -05:00
Guillaume J. Charmes
066b961a0c Merge pull request #3060 from dotcloud/sd_notify
sd_notify ready status when accepting API requests
2013-12-05 10:33:29 -08:00
Victor Vieux
f95f2789f2 add docs for the new json format 2013-12-05 10:12:22 -08:00
Victor Vieux
a8e99d9235 Merge pull request #2917 from codeaholics/2698-only-tag-top-layer
Only tag the top-most layer when pushing to a registry
2013-12-05 10:02:57 -08:00
Johan Euphrosine
5a17c208cd docs/installation/google: add enabling Google Compute Engine step 2013-12-05 09:43:08 -08:00
Sven Dowideit
e9bf971e69 use the Makefile in the dev environment documentation - its way less typing, and fewer typing mistakes 2013-12-05 21:48:35 +10:00
Roberto Gandolfo Hashioka
909da5d524 - Added link to the source code repo on github 2013-12-04 21:08:44 -08:00
Sven Dowideit
04c32495f6 add a little prose to tell the user that run creates a container, and then starts it 2013-12-05 14:20:16 +10:00
Sven Dowideit
af020e2d67 I was reading the doc, and noticed that some examples didnt use $ sudo docker, so this makes it a little more consistent 2013-12-05 14:14:08 +10:00
Andy Rothfusz
94d46a8d3a Merge pull request #2991 from fortybillion/patch-1
API documentation: added format of Volumes and Binds parameters to "create" and "start"
2013-12-04 18:32:19 -08:00
Andy Rothfusz
ec9f2f1d0f Merge branch '2971-cleanconflicts' 2013-12-04 18:26:45 -08:00
Andy Rothfusz
0bfa22124e Merge branch 'pgdoc' of github.com:jamtur01/docker into 2971-cleanconflicts
Conflicts:
	docs/sources/examples/postgresql_service.rst
2013-12-04 18:25:58 -08:00
Victor Vieux
79031c4f8c Merge pull request #2907 from goldmann/iptables-fix
Make sure the firewall rules are created even if the bridge interface is already created
2013-12-04 18:04:57 -08:00
Victor Vieux
5f55c1aee1 Merge pull request #2966 from andrewsmedina/govet2
fixed some `go vet` issues.
2013-12-04 17:57:10 -08:00
Solomon Hykes
b4e21ad1da Add Sven Dowideit to docs maintainers 2013-12-04 17:50:33 -08:00
Victor Vieux
97088ebef7 sd_notify ready status when accepting API requests 2013-12-04 17:50:20 -08:00
Solomon Hykes
c35cebaa06 Merge pull request #3031 from jamtur01/docsmaintain
Add James Turnbull to docs maintainers
2013-12-04 17:48:45 -08:00
Victor Vieux
41b5e87873 Merge pull request #2993 from jpoimboe/rhel6-version-check
remove kernel version warning on rhel 6.5
2013-12-04 17:46:20 -08:00
Andy Rothfusz
9dfc7bc36f Merge pull request #2834 from fj/topic/clarify-aws-instructions
Fix installing-on-AWS workflow description
2013-12-04 17:25:35 -08:00
Guillaume J. Charmes
afbea3f13f Merge pull request #3058 from dotcloud/add_stderr_hijack
fix docker run -a stderr
2013-12-04 17:16:56 -08:00
Andy Rothfusz
5dab47a475 Merge pull request #2886 from jamtur01/fedora_community
Updated Fedora docs
2013-12-04 17:12:00 -08:00
Victor Vieux
3ba279a370 fix docker run -a stderr 2013-12-04 17:08:14 -08:00
Guillaume J. Charmes
944c1f10ea Merge pull request #2967 from crosbymichael/return-host-config
Add hostconfig to container inspect
2013-12-04 16:50:42 -08:00
Michael Crosby
0d1506adb3 Move test cpu shares out of test start
Fixes #2107
2013-12-04 16:31:09 -08:00
Michael Crosby
3a8222dfa5 Merge pull request #3032 from tianon/makefile-tweaks
Tweak Makefile for consistency and Ctrl+C-ability
2013-12-04 16:04:47 -08:00
Tianon Gravi
00030ced4b Tweak Makefile for consistency and Ctrl+C-ability (also, -rm to keep it clean) 2013-12-04 16:56:01 -07:00
Andy Rothfusz
f95621fd05 Merge pull request #3033 from proppy/add-compute-docs
Add Google Cloud Platform installation docs
2013-12-04 15:05:07 -08:00
Guillaume J. Charmes
4328926acc Merge pull request #3020 from crosbymichael/remove-init-layer
Ensure that the init layer is removed with the container
2013-12-04 14:46:48 -08:00
Johan Euphrosine
3df5d120de docs/installation: add google compute engine quickstart 2013-12-04 12:25:22 -08:00
Victor Vieux
1b5517b68f Merge pull request #3046 from dotcloud/fix_offline_image_transfert_jsonmessage
Fix offline image transfer jsonmessage
2013-12-04 12:05:28 -08:00
Victor Vieux
4bc100b494 fix jsonmessage 2013-12-04 11:57:18 -08:00
Victor Vieux
be282b57d5 fix offline image transfert 2013-12-04 11:55:42 -08:00
Victor Vieux
12180948be remove unused parameter in Download 2013-12-04 11:54:11 -08:00
Victor Vieux
6cf2c14c00 Merge pull request #2796 from EmilHernvall/master
Support for same port on multiple interfaces
2013-12-04 11:15:51 -08:00
Michael Crosby
dc9f8bf072 Merge pull request #2790 from proppy/fix-tar
utils: remove dotcloud/tar dep
2013-12-04 11:06:13 -08:00
Michael Crosby
0862756beb Merge pull request #3018 from pnasrat/3017-debug-expvar
Expose expvar endpoint during debugging.
2013-12-04 10:53:35 -08:00
Andy Rothfusz
de60bee3d4 Merge pull request #2946 from dhrp/doc-postgres-typo
Fixed some grammar and one other line in postgres example
2013-12-04 10:51:02 -08:00
Michael Crosby
51b9fe7301 Merge pull request #3000 from creack/improve_make_sh_test
Improve make sh test
2013-12-04 10:41:55 -08:00
Michael Crosby
61aad8fc10 Merge pull request #3029 from pnasrat/container-refactoring
Container refactoring
2013-12-04 10:35:42 -08:00
Sven Dowideit
6ddea783ef initial stab at writing a use/ambassador-pattern howto (Issue #2485) 2013-12-04 16:47:45 +10:00
Roberto Gandolfo Hashioka
58c33360b0 - Updated the doc with the current implementation status 2013-12-03 16:39:23 -08:00
Paul Nasrat
40fe9f581b Extract volume bind, creation and external methods.
Make Start() slightly more readable.
2013-12-03 18:58:31 -05:00
Sam Alba
258d707548 Merge pull request #2339 from shin-/private_reg_auth
Private registry auth
2013-12-03 15:29:38 -08:00
Victor Vieux
99e4f56353 Merge pull request #2930 from creack/docker-osx
Docker client on OSX
2013-12-03 15:27:21 -08:00
James Turnbull
0132547a38 Added myself to docs MAINTAINERS 2013-12-03 17:51:34 -05:00
Paul Nasrat
84f78d9cad Extract helper method for volume linking.
Makes this more readable.
2013-12-03 17:35:54 -05:00
Victor Vieux
f8176de191 Merge pull request #2999 from dotcloud/improve_progress_bars
Handle small screens
2013-12-03 14:24:40 -08:00
Victor Vieux
f50fe14e13 Handle small screens 2013-12-03 14:21:33 -08:00
James Turnbull
45567f2209 Updated Fedora docs
* Added Fedora installation instructions
2013-12-03 16:55:55 -05:00
Victor Vieux
2fd76fc0b8 Merge pull request #3009 from silas/stream-logs
Add stream flag to logs command
2013-12-03 13:43:35 -08:00
Silas Sewell
b699aee91f Rename logs -stream to logs -f 2013-12-03 20:35:22 +00:00
James Turnbull
64439505c7 Added note about Ubuntu curl installation 2013-12-03 14:37:01 -05:00
Michael Crosby
664174c7aa Add docs for hostconfig in inspect 2013-12-03 11:09:10 -08:00
Michael Crosby
7428e6a5f0 Merge pull request #3022 from pnasrat/3021-makefile-testflags
Support TESTFLAGS
2013-12-03 10:55:22 -08:00
Paul Nasrat
d21563ced3 Support TESTFLAGS 2013-12-03 14:49:10 -05:00
Paul Nasrat
6a55169e2e Expose expvar endpoint during debugging.
Fixes #3017
2013-12-03 13:04:18 -05:00
Michael Crosby
5976c26c1e Ensure that the init layer is removed with the container 2013-12-03 09:44:48 -08:00
Paul Nasrat
b59dea6767 Add a proposal step for potential MAINTAINERS.
Fixes #3014
2013-12-03 11:04:53 -05:00
shin-
9be5db8704 Handle 401 response in auth.Login() for authed private registries 2013-12-03 16:32:13 +01:00
shin-
3f92163989 Don't return req as result of setTokenAuth 2013-12-03 16:32:13 +01:00
shin-
3b5010e90b missed one call to setTokenAuth 2013-12-03 16:32:13 +01:00
shin-
ec4863ae55 Factorized auth token setting 2013-12-03 16:32:13 +01:00
shin-
a02bc8a5db gofmt 2013-12-03 16:32:13 +01:00
shin-
045989e3d8 Use basic auth for private registries when over HTTPS.
RequestFactory is no longer a singleton (can be different for different instances of Registry)
Registry now has an indexEndpoint member
Registry methods that needed the indexEndpoint parameter no longer do so
Registry methods will only use token auth where applicable if basic auth is not enabled.
2013-12-03 16:32:13 +01:00
shin-
bbf9135adc Added HTTPAuthDecorator 2013-12-03 16:24:47 +01:00
Emil Hernvall
1cb1e08644 Support for same port on multiple interfaces
This commit improves upon the PortMapper and PortAllocator classes by changing
their internal data structures for port allocations to use a string rather than
a single integer. This string holds the network interface address as well as the
port number. This solves a previous problem where a port would be incorrectly
reported as being in use because it had been allocated for a different interface.

I've also added a basic test case for the PortMapper class, and extended the
existing test case for PortAllocator. In the case of PortMapper, this is done
by handing it a stub function for creating proxies rather than an actual
implementation.
2013-12-03 15:14:54 +01:00
Marek Goldmann
682cf48d1d Instructions on how to install Docker on Red Hat Enterprise Linux / CentOS. 2013-12-03 11:25:53 +01:00
Sven Dowideit
48e1766527 add an example of docker ps, and also of link aliases 2013-12-03 17:57:51 +10:00
Silas Sewell
3ddbb36a84 Only stream logs when container is running 2013-12-03 07:17:07 +00:00
Silas Sewell
62263967b9 Add stream flag to logs command 2013-12-03 06:18:01 +00:00
Michael Crosby
3ed0ff85f5 Merge pull request #2821 from jpellerin/2820-fix-daemon-dns
Fixes #2820
2013-12-02 18:19:31 -08:00
Michael Crosby
c4c90e9cec Add hostconfig to container inspect 2013-12-02 18:06:04 -08:00
Tianon Gravi
650d4cc644 Merge pull request #3005 from gurjeet/zfs_driver_owner
Typo fix
2013-12-02 15:06:05 -08:00
Gurjeet Singh
d9b742419c Typo fix 2013-12-02 18:03:54 -05:00
Tianon Gravi
c81bb20f5b Add cgroup-bin dependency to our Ubuntu package
Since cgroup-bin is only "recommended" by the lxc package on Ubuntu, but is necessary for having the proper cgroups mounted for Docker to function, this makes some sense for us to add separately.

Fixes #2990
2013-12-02 16:02:54 -07:00
John Feminella
6c70d23e0d Fixes broken RST links; clarifies AWS instructions 2013-12-02 17:55:42 -05:00
John Feminella
c9432cf51a Reformats source text to proper widths 2013-12-02 17:55:42 -05:00
Guillaume J. Charmes
829b118dd8 Add some color in order to emphasis the test FAILURE 2013-12-02 14:54:01 -08:00
Tianon Gravi
3ac76cfeff Update bundlescript shebangs to be bash, reflecting how they're actually invoked 2013-12-02 15:48:39 -07:00
JP
5a9cf7e754 Add unit test for ValidateIp4Address 2013-12-02 17:33:33 -05:00
Josh Poimboeuf
e4aba11e80 add env variable to disable kernel version warning
Allow the user to set DOCKER_NOWARN_KERNEL_VERSION=1 to disable the
warning for RHEL 6.5 and other distributions that don't exhibit the
panics described in https://github.com/dotcloud/docker/issues/407.
2013-12-02 15:56:51 -06:00
Tianon Gravi
9d62dc1a08 Remove mkimage-centos.sh in favor of a new rinse-based script, since febootstrap is fragile and picky 2013-12-02 14:54:04 -07:00
Guillaume J. Charmes
0017c68f4a Remove trailing whitespace 2013-12-02 13:52:27 -08:00
JP
3cd9b2aadf Fixes #2820 2013-12-02 16:22:39 -05:00
Victor Vieux
8afb0abbee Merge pull request #2998 from creack/fix_json_display
Fix json display
2013-12-02 12:58:58 -08:00
Guillaume J. Charmes
98ed1dc433 Fix unit test with new buildfile prototype 2013-12-02 12:51:37 -08:00
Victor Vieux
7aec93c370 Merge pull request #2959 from crosbymichael/fix-auth-split
Split auth on first colon
2013-12-02 12:21:04 -08:00
Victor Vieux
62f0e5aef9 Merge pull request #2965 from crosbymichael/no-private-port
Only return published ports for docker port
2013-12-02 12:18:47 -08:00
Victor Vieux
59a85798fa Merge pull request #2982 from tianon/dockerfile-best-practice
Update Dockerfile with all-caps INSTRUCTIONS
2013-12-02 12:14:04 -08:00
Victor Vieux
67c03552f6 Merge pull request #2957 from creack/handle_sigquit
Catch SIGQUIT for cleanup
2013-12-02 12:13:07 -08:00
Victor Vieux
4fdc117ad2 Merge pull request #2921 from dustin/inspect-format
Add -format to 'docker inspect'
2013-12-02 12:08:15 -08:00
Guillaume J. Charmes
5cd09dc115 small reformatting jsonmessage 2013-12-02 11:49:11 -08:00
Guillaume J. Charmes
6ea3b9651b Fix displayJson behavior (dont add newline) 2013-12-02 11:47:13 -08:00
Guillaume J. Charmes
de4429f70d Do not format at each write but use a Writer instead (build) 2013-12-02 11:43:41 -08:00
Michael Crosby
8cc524996a Merge pull request #2924 from tianon/strict-fhs-compatibility
Add proper dockerinit path support for distros that use FHS 2.3
2013-12-02 11:35:36 -08:00
Tianon Gravi
d9fbdd7b3f Merge pull request #2995 from gurjeet/zfs_driver_owner
Add contrb/zfs/ and add self as maintainer of ZFS storage driver.
2013-12-02 11:10:16 -08:00
Dustin Sallings
4ad3dfb05f CLI docs and examples of format 2013-12-02 11:07:41 -08:00
Dustin Sallings
1d503be466 Use inspect format to get IP address for psql example 2013-12-02 11:07:41 -08:00
Dustin Sallings
9837ad8e9b Add -format to 'docker inspect'
This makes it a lot easier to script with docker instances as one can
ask for details about running instances more easily without having to
have additional JSON processing tools installed.

dotcloud/docker#734
2013-12-02 11:07:41 -08:00
Gurjeet Singh
70b586702c Add contrb/zfs/ and add self as maintainer of ZFS storage driver. 2013-12-02 14:03:16 -05:00
Victor Vieux
4b35c1b6a6 Merge pull request #2728 from SvenDowideit/docker-import-doc
re-word the help for docker import to make it clear that this will be an empty image containing only the files in the tar file
2013-12-02 10:59:15 -08:00
Michael Crosby
fe571dd293 Merge pull request #2829 from dotcloud/refactor_opts
Refactor opts
2013-12-02 10:41:30 -08:00
Michael Crosby
e1414a4c39 Merge pull request #2945 from dotcloud/refactor_stream
Refactor stream
2013-12-02 10:31:09 -08:00
Victor Vieux
d4ebba703c Merge pull request #2994 from discordianfish/1150-build-return-exit-code
Bump api version and update docs
2013-12-02 10:30:46 -08:00
Johannes 'fish' Ziemke
e4cb83c50e Bump api version and update docs 2013-12-02 19:27:28 +01:00
Michael Crosby
d7dd19d22e Merge pull request #2981 from tianon/less-verbose-testing
Remove "-v" from "go test" (since it's easy to add back manually via TESTFLAGS)
2013-12-02 10:14:25 -08:00
Guillaume J. Charmes
5f2313aad3 Merge pull request #2984 from SvenDowideit/2319-build-check-for-dockerfile
check on the client side that there is a Dockerfile
2013-12-02 09:35:31 -08:00
Guillaume J. Charmes
d6cdbca6c1 Merge pull request #2775 from daniel-garcia/2671-bindmount_files
fixes #2671, add support for bind mounting individual files in to contai...
2013-12-02 09:30:08 -08:00
Guillaume J. Charmes
751250015b Merge pull request #2206 from discordianfish/1150-build-return-exit-code
Make docker build return exit code of build step
2013-12-02 09:01:11 -08:00
Johannes 'fish' Ziemke
b04c6466cd Make docker build return exit code of build step
If a command during build fails, `docker build` now returns with
the exit code of that command.

This makes it necessary to change the build api endpoint to
return a json object stream.
2013-12-02 17:52:37 +01:00
Ben Sargent
b9ad0c9f74 Added format of Volumes and Binds parameters to create and start
* Added sample Volumes parameter to container create API.
* Added PortBindings and PublishAllPorts parameter to container start API
* Added note to container start about Binds needed to be declared as Volumes during container create.
2013-12-02 16:52:12 +00:00
Tianon Gravi
dbb47f63ab Add udev rules files for hiding the docker loopback devices from udisks
This prevents them from showing up on the desktop in a window manager, for example.
2013-12-02 09:11:06 -07:00
Solomon Hykes
c4548506c5 Merge pull request #2693 from fkautz/makefile
Adding a makefile
2013-12-01 23:25:24 -08:00
Sven Dowideit
26cf8b9aff check on the client side that there is a Dockerfile, so we don't upload a huge stack of files, only to realise we can't do anything 2013-12-02 15:55:08 +10:00
Tianon Gravi
7f1a91121c Update Dockerfile with all-caps INSTRUCTIONS (as explained in docs as being "convention": http://docs.docker.io/en/latest/use/builder/#format) 2013-12-01 21:31:28 -07:00
Tianon Gravi
c30e2dc28c Remove "-v" from "go test" (since it's easy to add back via TESTFLAGS) 2013-12-01 21:20:35 -07:00
Guillaume J. Charmes
d9cdd45d2e Merge pull request #2978 from tianon/go1.2
Update to Go 1.2 officially, now that it is released
2013-12-01 16:46:38 -08:00
Tianon Gravi
5c5f670901 Update to Go 1.2 officially, now that it is released 2013-12-01 15:11:10 -07:00
Andrews Medina
fea432bdf5 fixed Looback typo. 2013-11-30 16:28:52 -02:00
James Turnbull
39aac21db4 Updated and simplified the PG example 2013-11-30 10:58:52 -05:00
Frederick F. Kautz IV
56ab9cb0d5 Minor fixes based on discussions on #2693
* Volume exports ./bundles instead of root directory
* Documents build using docker-docs instead of docker:docs
* Bundles directory is created before running build or docs
2013-11-30 00:09:41 -08:00
Solomon Hykes
d8ee08ba7b Merge pull request #2925 from tianon/update-systemd-service
Update systemd service file with suggestions from @seblu
2013-11-29 21:25:08 -08:00
Andrews Medina
e8437e8fcf using errors.New instead fmt.Error. 2013-11-30 01:02:09 -02:00
Guillaume J. Charmes
4e030c78d2 Merge branch 'master' into handle_sigquit
Conflicts:
	server.go
2013-11-29 18:51:32 -08:00
Guillaume J. Charmes
62b1faf28c Merge pull request #2926 from crosbymichael/attach-wait
Return process exit code for attach
2013-11-29 18:49:24 -08:00
Guillaume J. Charmes
2dac7b5209 Merge pull request #2781 from shykes/engine-status-int
Engine: integer status, better streaming, lots of tests
2013-11-29 17:57:28 -08:00
Guillaume J. Charmes
1890301e67 Merge pull request #2964 from andrewsmedina/gofmt2
go fmt.
2013-11-29 17:19:28 -08:00
Andrews Medina
a6c9a332d0 fixed some go vet issues. 2013-11-29 22:53:20 -02:00
Michael Crosby
65db62619c Only return published ports for docker port 2013-11-29 16:45:20 -08:00
Solomon Hykes
35d54c6655 Fix a bug in Output.Write, and improve testing coverage of error cases. 2013-11-30 00:25:46 +00:00
Solomon Hykes
3553a803e3 Engine: better testing of streams and of basic engine primitives. Coverage=81.2% 2013-11-30 00:25:46 +00:00
Solomon Hykes
a4f8a2494b Engine: integer job status, improved stream API
* Jobs return an integer status instead of a string
* Status convention mimics unix process execution: 0=success, 1=generic error, 127="no such command"
* Stdout and Stderr support multiple thread-safe data receivers and ring buffer filtering
2013-11-30 00:25:46 +00:00
Andrews Medina
fe72f15e4a go fmt.
result of `gofmt -w -s .` without vendors.
2013-11-29 22:20:59 -02:00
Michael Crosby
a37b155384 Split auth on first colon 2013-11-29 15:14:36 -08:00
Guillaume J. Charmes
82cecb34b5 Merge pull request #2954 from creack/fix_tests
Fix tests
2013-11-29 15:08:11 -08:00
Guillaume J. Charmes
e1278e9ec2 Merge pull request #2952 from codeaholics/2126-registry-close-wait
Closing HTTP connection after Registry ping
2013-11-29 14:21:52 -08:00
Guillaume J. Charmes
db7c55ba7f Catch SIGQUIT for cleanup 2013-11-29 14:13:00 -08:00
Guillaume J. Charmes
0d3f4017cf Merge pull request #2929 from pnasrat/2928-fix-signal-handling
Remove incorrect SIGKILL handler.
2013-11-29 14:10:46 -08:00
Guillaume J. Charmes
ab35aef6b5 Add unit test to check bind / server side 2013-11-29 13:43:37 -08:00
Guillaume J. Charmes
bb284ce59d Merge branch 'master' into fix_tests 2013-11-29 13:17:36 -08:00
Guillaume J. Charmes
34353e782e Reduce the timeout for restart/stop 2013-11-29 11:08:01 -08:00
Michael Crosby
ca98434a45 Search for repo first before image id 2013-11-29 11:06:35 -08:00
Guillaume J. Charmes
86c00be180 Fix behavior of tty tests 2013-11-29 10:17:25 -08:00
Guillaume J. Charmes
2ec1146679 Remove an unit test from integrations test 2013-11-29 10:17:04 -08:00
Guillaume J. Charmes
2e6a958612 Fix TestAttachDetachTruncatedID (behavior + tty issue) 2013-11-29 10:03:36 -08:00
Guillaume J. Charmes
697be6aaa0 Create helper function for tests 2013-11-29 10:02:22 -08:00
Guillaume J. Charmes
c13821ad0b Make sure the termcaps are restored after hijack 2013-11-29 09:55:15 -08:00
Guillaume J. Charmes
aa68656cd3 Fix term.RestoreTerminal behavior 2013-11-29 09:52:44 -08:00
Guillaume J. Charmes
63d6cbe3e4 Actually test the detach (was not the case before) 2013-11-29 09:11:20 -08:00
Guillaume J. Charmes
67e9e0e11b Make the PTY in raw mode before assert test (TestAttachDetach) 2013-11-29 08:29:56 -08:00
Guillaume J. Charmes
fbebe20bc6 Add a GetPtyMaster() method to container to retrieve the pty from an other package.
We could also have put ptyMaster public, but then we need to ignore it in json
otherwise the Marshalling fails. I think it is cleaner that way.
2013-11-29 07:40:44 -08:00
Guillaume J. Charmes
e535f544c7 Make sure the container is running before testing against it (TestAttachDetach) 2013-11-29 07:39:51 -08:00
cressie176
fe727e2a87 Closing connection after ping 2013-11-29 12:07:20 +00:00
Guillaume J. Charmes
f72e604872 Merge branch 'master' into docker-osx 2013-11-28 21:16:32 -08:00
Victor Vieux
926f7b579e Merge pull request #2692 from SvenDowideit/add-specific-feedback-for-bad-ADD
Add specific feedback for ADD outside context
2013-11-28 17:55:11 -08:00
Victor Vieux
ff5747728c Merge pull request #2357 from ulyssecarion/master
Document setting up Vagrant-docker with the remote API
2013-11-28 17:54:05 -08:00
Thatcher
6c56993639 Merge pull request #2901 from dkumor/patch-1
Deleted references to AUFS in Arch docs
2013-11-28 17:39:26 -08:00
Sven Dowideit
ba5268d382 re-word the help for docker import to make it clear that this will be an empty image containing only the files in the tar file 2013-11-29 11:22:17 +10:00
Thatcher Peskens
8291d509c2 Fixed some grammar and one other line about needing the postgresql-client for connecting to the service. 2013-11-28 17:19:26 -08:00
Thatcher
139644895e Merge pull request #2894 from brunoqc/patch-1
Update postgresql's version in example
2013-11-28 17:16:25 -08:00
Victor Vieux
cca9e51f5d Merge pull request #2941 from codeaholics/2940-invalid-registry-calls
Stop invalid calls to Registry
2013-11-28 17:00:22 -08:00
Victor Vieux
668d22be54 Merge pull request #2808 from tianon/run-all-tests
Update test scripts to always run ALL tests, even when some fail
2013-11-28 16:58:31 -08:00
Guillaume J. Charmes
77c94175bd Make CopyEscapable consistent with Copy and return nil in case of success instead of io.EOF 2013-11-28 16:57:51 -08:00
Victor Vieux
f94ea7769f Merge pull request #2923 from creack/refactor_cgo_in_go
Refactor attach loop device in pure Go
2013-11-28 16:52:38 -08:00
Victor Vieux
39bec226c0 Merge pull request #2844 from tianon/fix-mount-spaces
Add space-escaping to path parts of lxc.mount.entry lines in generated lxc.conf, allowing for spaces in mount point names
2013-11-28 16:48:36 -08:00
Victor Vieux
677e2ad92e Merge pull request #2881 from yrashk/patch-1
Fix command line help for docker save
2013-11-28 16:42:40 -08:00
Victor Vieux
d3cc558d14 add test 2013-11-28 16:28:31 -08:00
Guillaume J. Charmes
ad43d88af5 Make race condition more obvious by performing more asserts 2013-11-28 16:12:45 -08:00
Michael Crosby
1fe1b216ad Return process exit code for attach
Fixes #2240
2013-11-28 15:25:50 -08:00
Solomon Hykes
3faf450f11 Merge pull request #2818 from pnasrat/docker-pprof
Wire in pprof handlers.
2013-11-28 14:43:13 -08:00
Victor Vieux
b36dd3f9cc fix display on test 2013-11-28 14:40:17 -08:00
Thatcher
a0525d90ab Merge pull request #2937 from jwarwick/hello_world_doc
Remove explanation of removed argument
2013-11-28 14:35:17 -08:00
Victor Vieux
ebc36b879d add progressbar and time 2013-11-28 14:33:18 -08:00
Thatcher
14425c1690 Merge pull request #2922 from ath0mas/patch-1
Restore 'save' paragraph
2013-11-28 14:15:43 -08:00
Michael Crosby
aae23255a0 Merge pull request #2888 from tianon/changelog-fix
Fix CHANGELOG: we ended up not merging the btrfs driver for last night's release
2013-11-28 12:24:56 -08:00
Guillaume J. Charmes
2bbc90e92f Make volumes opts more strict 2013-11-28 12:24:04 -08:00
Guillaume J. Charmes
0c758e9312 Merge branch 'master' into refactor_opts
Conflicts:
	commands.go
2013-11-28 12:23:45 -08:00
Victor Vieux
597e0e69b4 split in 3 files 2013-11-28 12:16:57 -08:00
Guillaume J. Charmes
261bd0d187 Improve devmapper unit tests with syscall/ioctl 2013-11-28 11:53:09 -08:00
Michael Crosby
3d0486979e Merge pull request #2892 from creack/add_cli_test-1
Add ParseRun unit tests
2013-11-28 11:39:48 -08:00
Victor Vieux
377817db1b Merge pull request #2913 from pnasrat/2852-deleteimage-performance
Performance of deleteImageAndChildren.
2013-11-28 11:20:43 -08:00
Guillaume J. Charmes
a990b3aeb9 Correct comments 2013-11-28 11:02:53 -08:00
Paul Nasrat
9f46779d42 Wire in pprof handlers.
Based on http://stackoverflow.com/questions/19591065/profiling-go-web-application-built-with-gorillas-mux-with-net-http-pprof
2013-11-28 13:46:58 -05:00
Guillaume J. Charmes
533067bba4 Rename file for consistency 2013-11-28 10:37:03 -08:00
Tianon Gravi
438607ecc3 Add proper dockerinit path support for distros that use FHS 2.3 2013-11-28 11:11:30 -07:00
Danny Yates
d47507791e Stop invalid calls to Registry
This code was resulting in a call for
/v1/images/<namespace>/<repository>/ancestry which the Registry
doesn't understand. Furthermore, it was masking the original
error.
2013-11-28 16:43:28 +00:00
Paul Nasrat
bdfe8ed403 Remove incorrect SIGKILL handler.
As per POSIX signal handling SIGKILL does not work.

Fixes #2928
2013-11-28 10:42:06 -05:00
John Warwick
f1e44e0b0c Remove explanation of removed argument 2013-11-28 09:55:15 -05:00
Ulysse Carion
c226ab6d9e Document setting up Vagrant-docker with the remote API 2013-11-27 20:05:54 -08:00
Guillaume J. Charmes
74ea136a49 Move reflink to os dependent file. OSX docker client fully functionnal. 2013-11-27 19:23:48 -08:00
Guillaume J. Charmes
24c03b2d93 Make devicemapper linux-only 2013-11-27 19:12:51 -08:00
Guillaume J. Charmes
a58fef9f13 Merge branch 'master' into refactor_cgo_in_go 2013-11-27 18:55:11 -08:00
Victor Vieux
597ca192e7 Merge pull request #2927 from andrewsmedina/stat_macos
Move syscall.Stats logic to os specific file.
2013-11-27 18:32:18 -08:00
Andrews Medina
8b2a7e35c3 Move syscall.Stats logic to os specific file.
related to #2909.
2013-11-28 00:22:47 -02:00
Guillaume J. Charmes
8a5d927a53 Check if the target loopback is a block device 2013-11-27 18:21:17 -08:00
Guillaume J. Charmes
1214b8897b Extract ioctl from wrapper 2013-11-27 17:47:20 -08:00
Guillaume J. Charmes
eb528b959e Move attach loop device to its own file 2013-11-27 17:12:57 -08:00
Tianon Gravi
75e9cff98c Update systemd service file with suggestions from @seblu 2013-11-27 16:55:37 -07:00
Guillaume J. Charmes
74c8f7af75 Refactor attach loop device in pure Go 2013-11-27 15:39:30 -08:00
Alexis THOMAS
2c27da8818 Restore 'save' paragraph 2013-11-28 00:39:06 +01:00
Solomon Hykes
39f21af687 Merge pull request #2914 from crosbymichael/open-issues
Add instructions for opening issues on the repository
2013-11-27 15:25:12 -08:00
Danny Yates
d1a631cedb Only tag the top-most layer, not all interim layers 2013-11-27 22:00:58 +00:00
Michael Crosby
7f9cdaa342 Merge pull request #2831 from SvenDowideit/please-ignore-squiggle-backup-files
Please ignore squiggle backup files
2013-11-27 11:04:21 -08:00
Michael Crosby
e4ae44b844 Add instructions for opening issues on the repository 2013-11-27 10:41:20 -08:00
Guillaume J. Charmes
89454851d1 Merge pull request #2814 from dotcloud/release_checklist_update
update release checklist
2013-11-27 10:04:55 -08:00
Guillaume J. Charmes
f75dc36204 Merge pull request #2904 from SvenDowideit/dont-show-user-internal-slash-in-error-message
there appears to be a slash prepended to a container name internally
2013-11-27 09:58:24 -08:00
Michael Crosby
5fe5055bd9 Merge pull request #2910 from codeaholics/wait-for-simultaneous-pull
Allow multiple clients to pull the same tag simultaneously
2013-11-27 09:58:13 -08:00
Paul Nasrat
4e826e99b2 Performance of deleteImageAndChildren.
Don't walk the file system for parents each time we recurse.

Fixes #2852
2013-11-27 12:55:15 -05:00
Danny Yates
788feab3a7 Handle the case where poolAdd() gives an error for an unknown pool type 2013-11-27 16:53:36 +00:00
dkumor
682a188ead Arch docs: Added lxc-docker-nightly AUR package, modified deps
lxc-docker-nightly installs latest build. Removed go from dependencies, as it is not needed in lxc-docker and lxc-docker-nightly. The -git package will flag go as a dependency upon installation.
2013-11-27 10:25:30 -06:00
Bruno Bigras
45b1e8c236 Update postgresql's version in example
It seems ppa:pitti/postgresql will be deprecated and only apt.postgresql.org has 9.3.
2013-11-27 09:55:41 -05:00
Danny Yates
ae474e05f5 Allow multiple clients to pull the same tag simultaneously
If two clients simultaneously try to pull the same tag, there was a race
whereby one would succeed and the second would generate an error. Now,
the second simply waits for the first to complete.
2013-11-27 12:18:01 +00:00
Marek Goldmann
0ff9bc1be3 Make sure the firewall rules are created even if the bridge interface is already created 2013-11-27 12:14:18 +01:00
dkumor
b3e8ba1908 Arch docs: Updated dependencies to match AUR
The AUR packages lxc-docker and lxc-docker-git have changed their dependencies.
2013-11-26 22:07:56 -06:00
Sven Dowideit
7b95d41092 tell anyone that might want to ignore their editor choice's backup files that there is a better way - thanks @pnasrat 2013-11-27 13:19:06 +10:00
Sven Dowideit
1cb7b9adc6 there appears to be a slash prepended to a container name internally - don't tell the user about it in an error message, it might cause them to worry about it 2013-11-27 12:58:54 +10:00
dkumor
d370a889c3 Deleted references to AUFS
AUFS is no longer a dependency (both lxc-docker and lxc-docker-git are >=0.7), and the Arch kernel doesn't need to be replaced with AUFS_friendly.
2013-11-26 20:00:13 -06:00
Michael Crosby
6d34c50e89 Increase max image depth to 127 2013-11-26 17:04:55 -08:00
Tianon Gravi
6344e6f258 Merge pull request #2887 from kleptog/master
Add mkseccomp.pl, helper script to make seccomp profiles.
2013-11-26 15:24:35 -08:00
Guillaume J. Charmes
462e30dcbd Add parseRun volume unit tests 2013-11-26 23:03:50 +00:00
Guillaume J. Charmes
c7661f40b6 Make volumes opts more strict 2013-11-26 23:00:44 +00:00
Guillaume J. Charmes
c707c587c1 Add ParseRun unit tests 2013-11-26 20:16:16 +00:00
Guillaume J. Charmes
5e3f6e7023 Change the default Host affectation to not rely on slice 2013-11-26 18:31:59 +00:00
Guillaume J. Charmes
1beb5005d1 Format main() 2013-11-26 17:47:58 +00:00
Guillaume J. Charmes
1ba11384bf Refactor Opts 2013-11-26 17:46:06 +00:00
Tianon Gravi
8398abf0dc Fix CHANGELOG: we ended up not merging the btrfs driver for last night's release 2013-11-26 09:36:46 -07:00
Martijn van Oosterhout
ab3a83c617 Add mkseccomp.pl, helper script to make seccomp profiles. 2013-11-26 17:13:02 +01:00
Guillaume J. Charmes
8b99e4ed37 Merge pull request #2884 from nikai3d/patch-1
fix typo in CONTRIBUTING.md
2013-11-26 07:40:49 -08:00
Guillaume J. Charmes
17efa9dc2d Merge pull request #2872 from goldmann/runtime_typo
Fix the 'but is not' typo
2013-11-26 07:40:25 -08:00
Nicolas Kaiser
76c71260f1 fix typo in CONTRIBUTING.md 2013-11-26 16:13:39 +01:00
Yurii Rashkovskii
8267437294 Fix command line help for docker save
`docker save IMAGE DESTINATION` is not what `docker save` expects
2013-11-26 06:54:46 -08:00
Marek Goldmann
9c15322894 Fix the 'but is not' typo. 2013-11-26 11:18:50 +01:00
Victor Vieux
0d078b6581 fix -link parsing 2013-11-26 00:09:45 -08:00
Victor Vieux
06d5e25224 Merge pull request #2870 from dotcloud/fix-links-parsing
fix -link parsing
2013-11-26 00:09:09 -08:00
Victor Vieux
009024ad64 fix -link parsing 2013-11-26 00:05:46 -08:00
Victor Vieux
14d9f04e89 fix docker run on an unknown image 2013-11-25 23:28:48 -08:00
Victor Vieux
18d08d0d42 Merge pull request #2869 from dotcloud/fix-docker-pull-run
fix docker run on an unknown image
2013-11-25 23:28:15 -08:00
Victor Vieux
0bb2c0b1d0 fix docker run on an unknown image 2013-11-25 23:20:36 -08:00
Tianon Gravi
1af6ffb9bb Add explicit test strings for new escapeFstabSpaces function 2013-11-26 00:11:42 -07:00
Michael Crosby
233ad38802 Merge branch 'release' 2013-11-25 22:25:47 -08:00
Solomon Hykes
db28e839e0 Merge pull request #2857 from shykes/0.7-release
0.7 release
2013-11-25 22:14:33 -08:00
Solomon Hykes
de30ffb2c3 Add 0.7.0 Changelog entry, with notable high-level features since 0.6.0. 2013-11-26 06:06:22 +00:00
Solomon Hykes
5c5ee194cb Bump version file to 0.7.0 2013-11-26 06:06:22 +00:00
Solomon Hykes
b6dd67c707 Merge pull request #2868 from jamtur01/ubtfixes
Tidied up Ubuntu installation documentation
2013-11-25 22:05:36 -08:00
Michael Crosby
740958dda7 Merge pull request #2867 from shykes/more-random-names
Add more moods and inventor names to random name generator
2013-11-25 22:04:29 -08:00
Solomon Hykes
c38386d876 Remove non-ascii characters from name generator 2013-11-26 06:02:37 +00:00
Victor Vieux
4267fb66ef Merge pull request #2866 from tianon/binaries-docs
Update "Binaries" installation language to point to distro packages first
2013-11-25 22:00:20 -08:00
James Turnbull
a74b512540 Tidied up Ubuntu installation documentation 2013-11-26 00:58:17 -05:00
Solomon Hykes
60809a4f72 Add more moods to random name generator 2013-11-26 05:55:48 +00:00
Tianon Gravi
65fcc81b42 Update "Binaries" installation language to point to distro packages first
Also, added a minor consistency update to our usage of "wget".
2013-11-25 22:53:50 -07:00
Michael Crosby
06cf8fee1b Merge pull request #2865 from tianon/arch-docs
A couple tiny docs consistency fixes
2013-11-25 21:53:16 -08:00
Tianon Gravi
c92dab0eb4 Update Fedora placeholder to include the same "community" and "pre-1.0" warnings so we don't forget them 2013-11-25 22:48:51 -07:00
Tianon Gravi
6ad5b2bcf4 Update Arch Linux instructions to include warnings at the top like other "community" platforms 2013-11-25 22:47:15 -07:00
Tianon Gravi
77f1362c64 Merge pull request #2864 from tianon/gentoo-docs
Update the Gentoo installation docs for 0.7+
2013-11-25 21:42:14 -08:00
Tianon Gravi
4049359bee Update the Gentoo installation docs for 0.7+ 2013-11-25 22:40:24 -07:00
Michael Crosby
7daefc9d3f Merge pull request #2862 from shykes/integration-tests-vfs
Improve integration tests with vfs driver
2013-11-25 21:37:35 -08:00
Victor Vieux
d4c32b9015 Merge pull request #2854 from dotcloud/copy_dockerinit
create a copy of dockerinit on new runtime to handle upgrades
2013-11-25 21:31:33 -08:00
Solomon Hykes
8bd6127ab3 Merge pull request #2815 from tianon/hack-make-tgz
Add tgz bundlescript
2013-11-25 21:29:14 -08:00
Solomon Hykes
2302293244 Fix a race condition in the integration tests 2013-11-26 05:28:00 +00:00
Tianon Gravi
fd7ff6411d Merge pull request #2863 from jamtur01/instsort
Installation docs update
2013-11-25 21:19:30 -08:00
James Turnbull
59f76bf1c7 Installation docs update
* Updated TOC sort order
* Fixed some grammar in Rackspace doc
* Updated titles to remove Linux where not needed
2013-11-26 00:16:30 -05:00
Solomon Hykes
02cb7f45fa Fix a race condition in TestInterruptedRegister 2013-11-26 05:05:15 +00:00
Solomon Hykes
a937313747 Always use the 'vfs' storage driver in integration tests. To test other drivers, we need a dedicated driver validation suite. 2013-11-26 05:05:15 +00:00
Solomon Hykes
fb3d60f27a Move all graph tests into integration, because they now rely on the underlying graph driver, which currently cannot be mocked. 2013-11-26 05:05:15 +00:00
Tianon Gravi
5ff74e268d Merge pull request #2860 from jamtur01/fedora2
Updated Fedora docs with simple placeholder
2013-11-25 20:59:15 -08:00
Solomon Hykes
09b7b55e2c Merge pull request #2861 from crosbymichael/fix-time-race-archive
Set consistent time for sample dir in archive test
2013-11-25 20:24:16 -08:00
Victor Vieux
110c4f2043 create a copy of dockerinit 2013-11-25 20:21:54 -08:00
Michael Crosby
0d1b5d7676 Set consistent time for sample dir in archive test 2013-11-25 19:58:14 -08:00
James Turnbull
5242a49f3f Updated Fedora docs
* Added warning to Fedora docs
2013-11-25 22:51:38 -05:00
Michael Crosby
2586c042ae Merge pull request #2810 from dotcloud/use_utc_time
Use UTC for time
2013-11-25 19:11:59 -08:00
Solomon Hykes
688e86c625 Merge pull request #2858 from jamtur01/fedora
Added Fedora skeleton
2013-11-25 19:10:50 -08:00
James Turnbull
750d2d8d07 Added Fedora skeleton 2013-11-25 22:06:18 -05:00
Guillaume J. Charmes
19df6c32c0 Merge branch 'master' into use_utc_time
Conflicts:
	image.go
2013-11-25 19:01:13 -08:00
Solomon Hykes
1d903da6fd Merge pull request #2609 from shykes/0.6.5-dm-plugin
Move aufs to a storage driver, add devicemapper and dummy drivers
2013-11-25 18:58:26 -08:00
Solomon Hykes
aaefb8c07c Docs: update install pages (ubuntu and kernel requirements) to reflect the optional nature of AUFS 2013-11-26 02:45:32 +00:00
Guillaume J. Charmes
b3959e69b5 Merge pull request #2837 from shykes/0.7-names
New collection of random names for 0.7
2013-11-25 18:39:36 -08:00
Solomon Hykes
43c7df946d Merge branch 'master' into 0.6.5-dm-plugin 2013-11-26 02:00:25 +00:00
Solomon Hykes
6acdf68ee1 Merge pull request #2804 from tianon/fix-hack-git-assumption
Hack: add support for compiling using make.sh without '.git'
2013-11-25 16:52:22 -08:00
Michael Crosby
487b3d8a8c Merge pull request #72 from shykes/creack-reduce-debug
Reduce debugf frequency to avoid terminal freeze
2013-11-25 15:50:07 -08:00
Andy Rothfusz
33f70f8978 Merge pull request #2725 from SvenDowideit/docker-export-doc
add 'to STDOUT' to the help and give an example
2013-11-25 14:42:57 -08:00
Andy Rothfusz
809239c0af Merge pull request #2853 from estenberg/cfe-docker-process
docker in-container process management with CFEngine
2013-11-25 14:31:52 -08:00
Eystein Måløy Stenberg
937f52aef9 docker in-container process management with CFEngine 2013-11-25 14:24:23 -08:00
Andy Rothfusz
aa48acc5ec Merge pull request #2849 from jamtur01/supervisor
Added a Supervisor process management example
2013-11-25 13:50:44 -08:00
Andy Rothfusz
ac70e296db Merge pull request #2836 from fj/topic/spinx⇒sphinx
Corrects misspellings of 'Sphinx'
2013-11-25 13:40:26 -08:00
Andy Rothfusz
e2c3860ec3 Merge pull request #2835 from fj/topic/update-native-installation-instructions
Use requirements.txt rather than manual installation for docs
2013-11-25 13:28:18 -08:00
Andy Rothfusz
2d715bf3c0 Merge pull request #2833 from twillouer/patch-1
Better copy/paste :)
2013-11-25 13:21:50 -08:00
Victor Vieux
d9e54e28e7 Merge pull request #2848 from pnasrat/713-data-races
Fix data race in TestLogEvent
2013-11-25 12:07:01 -08:00
Guillaume J. Charmes
78d2e2dc37 Reduce debugf frequency to avoid terminal freeze 2013-11-25 12:06:16 -08:00
Paul Nasrat
abfdaca3f8 Fix data race in TestLogEvent
Found with -race. Improve locking on Server.
2013-11-25 14:17:58 -05:00
Guillaume J. Charmes
3a2fbcfdec Merge pull request #68 from crosbymichael/daemon-docs
Add daemon docs with selecting graph driver
2013-11-25 11:06:28 -08:00
James Turnbull
ba2b36e192 Added a Supervisor process management example 2013-11-25 14:05:01 -05:00
Guillaume J. Charmes
d47d49a2f9 Merge pull request #71 from crosbymichael/fix-dummy-import
Rename dummy in driver.go to vfs
2013-11-25 10:30:20 -08:00
Michael Crosby
8b0b10b6f9 Rename dummy in driver.go to vfs 2013-11-25 10:28:17 -08:00
Guillaume J. Charmes
399c71de83 Merge pull request #67 from crosbymichael/driver-flag
Change graph-driver flag to be s
2013-11-25 09:45:31 -08:00
Michael Crosby
d8f4b733f2 Add daemon docs with selecting graph driver 2013-11-25 09:44:55 -08:00
Michael Crosby
b4eeb6be61 Change graph-driver flag to be s 2013-11-25 09:43:32 -08:00
Guillaume J. Charmes
41704d8933 Merge pull request #69 from crosbymichael/rename-dummy
Rename dummy driver to vfs
2013-11-25 09:42:50 -08:00
Michael Crosby
64dd4afed6 Merge pull request #70 from codeaholics/use-https-url-in-dockerfile
Clone LVM using https: instead of git:
2013-11-25 09:17:35 -08:00
Andy Rothfusz
5da1ed3291 Merge pull request #2730 from SvenDowideit/docker-insert-doc
Be more explicit about what insert does.
2013-11-25 09:16:08 -08:00
Danny Yates
ad23745456 Clone LVM using https: instead of git:
The ports for the git protocol are not open in all corporate environments
2013-11-25 17:12:18 +00:00
Michael Crosby
cee0a292d0 Rename dummy driver to vfs 2013-11-25 09:04:04 -08:00
Tianon Gravi
b702edadb7 Format lxc_template.go with gofmt 2013-11-24 20:02:06 -07:00
Tianon Gravi
f16c45f8b0 Add space-escaping to path parts of lxc.mount.entry lines in generated lxc.conf, allowing for spaces in mount point names
Fixes #2802
2013-11-24 20:00:39 -07:00
Solomon Hykes
07180f3aa7 Add unit test for awesomeness of generated container names 2013-11-25 00:25:15 +00:00
Solomon Hykes
a606474825 Add Ada Lovelace to the names generator (thanks James Turnbull) 2013-11-25 00:22:06 +00:00
Solomon Hykes
5d6ef3177b New collection of random names for 0.7: mood + famous inventor. Eg. 'sad-tesla' or 'naughty-turing' 2013-11-25 00:22:06 +00:00
William Delanoue
0a89db04fe Better copy/paste :) 2013-11-24 13:55:53 +01:00
John Feminella
1cce9f25b2 Corrects misspellings of 'Sphinx' 2013-11-23 16:21:27 -05:00
John Feminella
f1b3e278b9 Suggest users install dependencies by using requirements.txt, not manually 2013-11-23 15:53:50 -05:00
Sven Dowideit
e288e7763e be very explicit about docker insert - it does not insert a file into an IMAGE, it creates a new image that adds only that file to its specified parent 2013-11-23 12:48:34 +10:00
Sven Dowideit
9696ec509a add 'to STDOUT' to the help, and add a simple eg that mentions it too 2013-11-23 12:28:50 +10:00
Sven Dowideit
96b5be9dd9 add more searchable info to the error message when ADD tries to go outside the context 2013-11-23 12:23:31 +10:00
Guillaume J. Charmes
ba6dd1d8d6 Merge pull request #2827 from dotcloud/2778-fix-shell-corrupt
2778 fix shell corrupt
2013-11-22 15:46:02 -08:00
Guillaume J. Charmes
c67f9b671d Remove useless New*Opt functions, singleline Opt types 2013-11-22 14:42:30 -08:00
Guillaume J. Charmes
1c8ae47770 Make a validation on links name 2013-11-22 14:33:25 -08:00
Guillaume J. Charmes
d55998be81 Remove goroutine leak. Make sure termcap are reset each time. 2013-11-22 14:09:37 -08:00
Michael Crosby
e69bbd239e Merge branch 'master' into 0.6.5-dm-plugin 2013-11-22 14:06:59 -08:00
Victor Vieux
a26f9183bd Merge pull request #56 from shykes/creack-dm-plugin
Use variable to call functions in devmapper_wrapper + some formatting
2013-11-22 14:05:59 -08:00
Victor Vieux
944a48ec5a Merge pull request #2822 from dotcloud/better_port_help
Better port help
2013-11-22 13:59:35 -08:00
Guillaume J. Charmes
79e2b33ede Use variable to call functions in devmapper_wrapper + some formatting 2013-11-22 12:25:37 -08:00
Guillaume J. Charmes
076c0eab70 Format CmdRun 2013-11-22 12:14:34 -08:00
Guillaume J. Charmes
1f9223a7c2 Use a constant for PortSpecTemplate + display the template in the CmdRun help 2013-11-22 12:00:34 -08:00
Guillaume J. Charmes
476559458d Reformatting parseRun and partParse 2013-11-22 11:58:02 -08:00
Michael Crosby
d4c8fb9ee2 Merge pull request #2769 from vbatts/none_bridge
Do not setup bridge ip if 'none' bridge is provided
2013-11-22 11:48:33 -08:00
Michael Crosby
ae8c589d35 Merge pull request #2823 from dotcloud/2817-run_rm-fix
Fix docker run -rm
2013-11-22 11:35:29 -08:00
Guillaume J. Charmes
6130f2531e Merge pull request #2677 from mrallen1/fix/2627
Check for a terminal before using escapes
2013-11-22 11:30:16 -08:00
Vincent Batts
ef14aaf627 fix the nil pointer panic on closing a disabled network manager
Issue #2768
2013-11-22 14:28:49 -05:00
Victor Vieux
1e7c04fcfe fix -rm 2013-11-22 11:23:48 -08:00
Tianon Gravi
37e0083169 Add a few more small RELEASE-CHECKLIST tweaks, fixes, and improvements 2013-11-22 11:46:15 -07:00
Mark Allen
8b0cd60019 Pass terminal setting to display utils 2013-11-22 00:05:55 -06:00
daniel-garcia
0198f8a879 fixes #2671, add support for bind mounting individual files in to containers, rebases of #1757 #2301 2013-11-21 23:40:35 -06:00
Tianon Gravi
b3f5973f41 Add new tgz bundlescript to default make and release 2013-11-21 22:34:54 -07:00
Tianon Gravi
3314e005f3 Add new bundlescript to build a tgz 2013-11-21 22:34:54 -07:00
Victor Vieux
a93e40a158 Merge pull request #2812 from dotcloud/bump_v0.6.7
Bump v0.6.7
2013-11-21 18:39:15 -08:00
Victor Vieux
58f8503b73 update release checklist 2013-11-21 18:34:23 -08:00
Victor Vieux
cb48ecc9dc Merge pull request #2812 from dotcloud/bump_v0.6.7
Bump v0.6.7
2013-11-21 18:21:19 -08:00
Victor Vieux
53f1bf0f99 Bump version to v0.6.7 2013-11-21 18:03:41 -08:00
Victor Vieux
9dc59797e0 Merge branch 'master' into bump_v0.6.7 2013-11-21 17:40:29 -08:00
Michael Crosby
0cecc2a78c Merge branch 'master' into 0.6.5-dm-plugin
Conflicts:
	container.go
	image.go
2013-11-21 17:18:41 -08:00
Michael Crosby
437bdeee59 Merge pull request #62 from shykes/devmapper-unit-tests
Devmapper unit tests
2013-11-21 16:54:50 -08:00
Guillaume J. Charmes
806abe90ba Use UTC for time 2013-11-21 16:43:36 -08:00
Michael Crosby
25e443a3c7 Merge pull request #2798 from dotcloud/fix_state_race
Refactor State to be 100% thread safe
2013-11-21 16:39:50 -08:00
Guillaume J. Charmes
33e70864a2 Refactor State to be 100% thread safe 2013-11-21 16:34:58 -08:00
Guillaume J. Charmes
bc82940a57 Forbid syscalls in tests, add 2 new unit tests 2013-11-21 16:32:16 -08:00
Michael Crosby
d6e6214d37 Merge pull request #2678 from cxmcc/minor
Minor code simplification for Containers api
2013-11-21 16:27:15 -08:00
Michael Crosby
70f1bd3104 Merge pull request #2488 from viirya/fix_container_volumes_delete
Skip the volumes mounted when deleting the volumes of container.
2013-11-21 16:20:22 -08:00
Michael Crosby
f7c2a00557 Merge pull request #2304 from unclejack/fix_layer_size_computation
Fix layer size computation: handle hard links correctly
2013-11-21 16:10:24 -08:00
Victor Vieux
8498b44eac Merge pull request #2731 from SvenDowideit/docker-commit-doc
make the docker commit help more copy&pasteable
2013-11-21 15:33:05 -08:00
Tianon Gravi
f1e6dce047 Update test scripts to always run ALL tests, even when some fail 2013-11-21 16:19:19 -07:00
Andy Rothfusz
e2dcfc2cf7 Merge pull request #2772 from bitoiu/patch-3
Change to documentation for AWS AMI request
2013-11-21 15:15:04 -08:00
Andy Rothfusz
9b4c151142 Merge pull request #2717 from metalivedev/2342-uploadingcontext
Fix #2342. Harmonize information about ADD. Cross-link build info.
2013-11-21 15:09:47 -08:00
Andy Rothfusz
50239e0573 Merge pull request #2799 from rosenhouse/patch-1
Fix title on doc page for remote_api_client_libraries.rst
2013-11-21 14:40:33 -08:00
Guillaume J. Charmes
42c23b0f04 Merge pull request #2763 from dotcloud/use_full_id_delete
Do ont truncate ID on docker rmi
2013-11-21 14:39:38 -08:00
Guillaume J. Charmes
eec91e7941 Merge pull request #2805 from pmorie/typo
Fix typo in pullImage
2013-11-21 14:35:07 -08:00
Paul Morie
3f17844b6e Fix typo in pullImage 2013-11-21 17:29:03 -05:00
Tianon Gravi
efd0e13ca7 Add support for compiling using make.sh without '.git' (ie, from a Github tarball, for example) 2013-11-21 15:11:17 -07:00
Guillaume J. Charmes
bcdeb37bb6 Merge pull request #2794 from dotcloud/fix_status_code_and_usage
fix status code and usage
2013-11-21 13:48:38 -08:00
Gabe Rosenhouse
362e9d6b3c Fix title on doc page for remote_api_client_libraries.rst 2013-11-21 12:45:01 -08:00
Guillaume J. Charmes
c4ab498920 Merge pull request #2797 from crosbymichael/revert-exit-lock
Revert "Lock state before we modify."
2013-11-21 12:19:23 -08:00
Michael Crosby
cb70eedfda Revert "Lock state before we modify."
This reverts commit d7e2fc8982.
2013-11-21 12:11:25 -08:00
Victor Vieux
75a7f4d90c Return status exit status 2 on usage, move parserun into commands.go, display usage on stderr 2013-11-21 11:43:07 -08:00
Victor Vieux
da824b4a5a Merge pull request #63 from tianon/flag-help-consistency
Update a few flag help strings for consistency and clarity
2013-11-21 11:31:34 -08:00
Tianon Gravi
1ab6b8bf49 Update a few flag help strings for consistency and clarity 2013-11-21 12:30:17 -07:00
Victor Vieux
eaeb969138 Merge pull request #2795 from pnasrat/docker-testmultipleattachrestart-race
Lock state before we modify.
2013-11-21 11:28:02 -08:00
Michael Crosby
253214f07d Update ImageExport after merge fail 2013-11-21 10:26:21 -08:00
Michael Crosby
a2c9d2da93 Merge branch 'master' into 0.6.5-dm-plugin 2013-11-21 10:21:30 -08:00
Paul Nasrat
d7e2fc8982 Lock state before we modify.
When we start a container we lock state, we should do the same in stop.

Detected via -race.
2013-11-21 08:06:02 -05:00
Victor Vieux
f20c738963 Merge pull request #2793 from crosbymichael/offline-ids
Allow images to be saved and loaded by id and repository
2013-11-21 02:44:00 -08:00
Solomon Hykes
df258f5861 Devmapper: test driver initialization and its interaction with libdevmapper 2013-11-21 02:17:03 +00:00
Solomon Hykes
60f728b170 Devmapper: wrap calls to os/exec for easier mocking 2013-11-21 02:16:26 +00:00
Solomon Hykes
2b7c63b1b5 devmapper: skip test which are not unit tests 2013-11-21 02:12:51 +00:00
Michael Crosby
fd7ab143bf Allow images to be saved and loaded by id and repository 2013-11-20 17:28:19 -08:00
Andy Rothfusz
82cdd21a34 Merge pull request #2727 from SvenDowideit/docker-images-doc
add some common examples for docker images, and tell the user what -a filters out
2013-11-20 16:30:48 -08:00
Michael Crosby
a9230af52e Merge pull request #57 from shykes/wait_on_pull_already
Wait on pull already in progress
2013-11-20 15:58:51 -08:00
Victor Vieux
2f0d18ac4a Merge pull request #59 from crosbymichael/fix-image-save-size
Fix image save size
2013-11-20 15:58:31 -08:00
Michael Crosby
6469422465 Merge pull request #58 from crosbymichael/update-aufs-tests
Add more aufs tests and implement Status
2013-11-20 15:41:59 -08:00
Michael Crosby
5306053e21 Add more aufs tests and implement Status 2013-11-20 15:41:37 -08:00
Solomon Hykes
e2390318bb Devmapper: mock all calls to libdevmapper in the unit tests, and deny them by default 2013-11-20 23:39:02 +00:00
Michael Crosby
4e0c76b321 Ensure that only the layers are compressed and not mnt points 2013-11-20 15:37:26 -08:00
Solomon Hykes
da514223d1 Devmapper: remove deprecated test helpers 2013-11-20 23:25:27 +00:00
Solomon Hykes
023ff36704 devmapper: fix typo 2013-11-20 23:12:19 +00:00
Michael Crosby
8fdbf46afb Fix image size calc on initial save 2013-11-20 14:51:04 -08:00
Guillaume J. Charmes
d233894c25 Add devmapper struct doc 2013-11-20 14:09:46 -08:00
Victor Vieux
8a756f417e wait on pull already in progress 2013-11-20 14:04:19 -08:00
Guillaume J. Charmes
a39bd65662 Remove os from devmapper 2013-11-20 13:05:17 -08:00
Guillaume J. Charmes
5690139785 Remove all syscall calls from devicemapper 2013-11-20 12:49:01 -08:00
Solomon Hykes
92f94f06ae Mock calls to system functions to facilitate unit testing 2013-11-20 20:05:10 +00:00
Michael Crosby
2bc35287a0 Merge pull request #2723 from SvenDowideit/doc-build-image
Use the work Path for docker cp CONTAINER:PATH
2013-11-20 11:10:13 -08:00
Michael Crosby
2382a0f920 Merge branch 'master' into 0.6.5-dm-plugin
Conflicts:
	server.go
2013-11-20 11:07:42 -08:00
Guillaume J. Charmes
579a5c843b Merge pull request #51 from crosbymichael/driver-specific-image
Handle image metadata when drivers are switched
2013-11-20 10:45:51 -08:00
Johan Euphrosine
b3bee7e0c4 utils: remove dotcloud/tar dep 2013-11-20 18:45:12 +00:00
Guillaume J. Charmes
6ebb236aa1 Merge pull request #55 from alexlarsson/dm-plugins-cleanup-graph-test
graph_test: Clean up drivers allocated in tempGraph()
2013-11-20 10:41:18 -08:00
Guillaume J. Charmes
b4f7078a02 Merge pull request #2773 from dotcloud/speed_up_docker_ps
speedup docker ps
2013-11-20 10:40:03 -08:00
Guillaume J. Charmes
9e68913397 Merge pull request #2760 from dotcloud/improve_engine_test
improve tests on the engine
2013-11-20 10:38:46 -08:00
Michael Crosby
1b28cdc7f9 Handle image metadata when drivers are switched 2013-11-20 10:31:51 -08:00
Victor Vieux
304a80fcd5 Merge pull request #2747 from jpoimboe/test-fixes
Test fixes
2013-11-20 10:07:40 -08:00
Guillaume J. Charmes
04f1d4dcdb Merge pull request #2788 from jpoimboe/resize-after-start
resize pty after starting
2013-11-20 10:01:59 -08:00
Josh Poimboeuf
171d681724 resize pty after starting
Since ptyMaster is created during container start (startPty), it should
resized after starting, not before.
2013-11-20 11:40:30 -06:00
Vincent Batts
60cb5f1a34 do not setup bridge ip if bridgeNetwork is nil. This is the case when
-b='none' bridge is provided.

issue #2768
https://bugzilla.redhat.com/show_bug.cgi?id=1032094
2013-11-20 09:36:38 -05:00
Alexander Larsson
cfdc284abe tags test: cleanup driver
If not we leak a devicemapper pool
2013-11-20 14:52:06 +01:00
Alexander Larsson
7192be47c5 graph_test: Clean up drivers allocated in tempGraph()
If we don't do this we leak devicemapper pools with the dm backend.
2013-11-20 14:42:32 +01:00
Michael Crosby
cd4c1ac356 Merge pull request #2779 from crosbymichael/pin-python-deps
Pip python deps to a specific version
2013-11-19 21:29:09 -08:00
Michael Crosby
b8af68a92b Merge pull request #1974 from dotcloud/1155-offline-image-transfer
Implement offline image transfers
2013-11-19 20:00:58 -08:00
Michael Crosby
9de4590498 Add typo in remove all. Ensure tmpDir is removed 2013-11-20 03:52:33 +00:00
Michael Crosby
0ef6fed5c7 Fix error checks for offline transfer and remove unneeded debug output 2013-11-20 03:52:33 +00:00
Frederick F. Kautz IV
383f95bba1 Adding 1.7 documentation 2013-11-20 03:52:33 +00:00
Frederick F. Kautz IV
1211065c8d Adding content type to images/(name)/get 2013-11-20 03:52:33 +00:00
Frederick F. Kautz IV
844c13bce6 Fixing build 2013-11-20 03:52:33 +00:00
Frederick F. Kautz IV
6014db4a7e Setting offline package version to 1.0 2013-11-20 03:52:33 +00:00
Frederick F. Kautz IV
bf504f2afa Adding spaces to fix docs build 2013-11-20 03:52:33 +00:00
Frederick F. Kautz IV
61a8020e51 Adding version info, switching to defer for cleanup 2013-11-20 03:52:33 +00:00
Frederick F. Kautz IV
7eaa59f626 Offline Image Transfers #1155 2013-11-20 03:52:33 +00:00
Michael Crosby
2cccbbdadd Pip python deps to a specific version 2013-11-19 19:25:54 -08:00
Tianon Gravi
66beafa9f3 rename file to remove testing flags from docker binary 2013-11-19 12:41:29 -08:00
Victor Vieux
8e5ab5bfca improve tests on the engine 2013-11-19 11:47:47 -08:00
Andy Rothfusz
4f9f83d6c6 Fix #2342. Harmonize information about ADD. Cross-link build info. 2013-11-19 11:16:28 -08:00
Victor Vieux
145c2008ae speedup docker ps 2013-11-19 11:02:10 -08:00
Vitor Monteiro
aeb304b37c Change to documentation for AWS AMI request
Hi guys, it just might be me, but clicking the AMI from http://cloud-images.ubuntu.com/locator/ec2/ is broken to me. So I just did it via the normal Create Instance Wizard.

I just though some people might have the same issue.

Sorry if my markdown for links is screwed up, I went by the examples, since I'm used to the `[]()` traditional one.

Cheers.
2013-11-19 17:50:38 +00:00
unclejack
78c843c8ef fix container size computation: handle hard links
This change makes docker compute container size correctly.

The old code isn't taking hard links into account. Containers could
seem like they're up to 1-1.5x larger than they really were.
2013-11-19 13:53:34 +02:00
unclejack
ac821f2446 fix layer size computation: handle hard links
This change makes docker compute layer size correctly.

The old code isn't taking hard links into account. Layers could
seem like they're up to 1-1.5x larger than they really were.
2013-11-19 13:37:54 +02:00
Victor Vieux
2fe4467d73 Do ont truncate ID on docker rmi 2013-11-18 18:39:02 -08:00
Josh Poimboeuf
4b80ec9aae test: remove extra args in TestExitCode
The extra blank argument isn't needed and confuses libvirt.
2013-11-18 15:07:11 -06:00
Josh Poimboeuf
fef41ef7bf test: fix TestRmi race condition 2013-11-18 15:07:06 -06:00
Josh Poimboeuf
fe302fbfd2 test: 2 second timeout (not 2000) 2013-11-18 10:23:30 -06:00
Josh Poimboeuf
72d02ecdde test: skip TestCreate on Fedora due to lxc utils bug
In the dind environment running on a Fedora host, the lxc utils get
confused by the /sys/fs/cgroup/cpuacct,cpu cgroup mount and lxc-start
fails trying to access the wrong cgroup directory.
2013-11-18 10:23:30 -06:00
Josh Poimboeuf
baa687bed2 test: fix TestCreateStartRestartStopStartKillRm
cat needs stdin opened, otherwise it dies immediately.
2013-11-18 10:23:30 -06:00
Josh Poimboeuf
30ea0bebce test: put each arg in a separate string
Each arg to docker run should be placed in a separate string.
Otherwise, when starting the command via exec.Cmd, the command is
interpreted as "echo test", which can't be found.
2013-11-18 10:23:06 -06:00
Sven Dowideit
bc74f65068 make the docker commit help more copy&pasteable 2013-11-16 21:11:34 +10:00
Sven Dowideit
152459b727 add some common examples for docker images, and tell the user what -a filters out 2013-11-16 20:45:30 +10:00
Sven Dowideit
27159ce6ba expunge the word 'Resource' in reference to a file/dir in a CONTAINER - that way users don't wonder how its different from a Path 2013-11-16 20:15:04 +10:00
Frederick F. Kautz IV
7267c4b746 Removing sudo 2013-11-14 05:53:53 +00:00
Frederick F. Kautz IV
26533eb2c4 Adding a makefile 2013-11-14 05:34:25 +00:00
Xiuming Chen
0013aa7d9f Minor code simplification for Containers api 2013-11-13 01:29:00 -08:00
Liang-Chi Hsieh
1d7f22c0d4 use Binds key in hostConfig to detect volumes mounted from external. 2013-11-13 15:08:46 +08:00
Liang-Chi Hsieh
6a693176d6 skip the volumes mounted when deleting the volumes of container. 2013-11-13 14:58:24 +08:00
Victor Vieux
6d420407ca Merge pull request #2577 from dotcloud/bump_v0.6.6
Bump v0.6.6
2013-11-06 12:03:03 -08:00
196 changed files with 12360 additions and 5608 deletions

3
.gitignore vendored
View File

@@ -1,3 +1,6 @@
# Docker project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
.vagrant*
bin
docker/docker

View File

@@ -1,5 +1,149 @@
# Changelog
## 0.7.2 (2013-12-16)
#### Runtime
+ Validate container names on creation with standard regex
* Increase maximum image depth to 127 from 42
* Continue to move api endpoints to the job api
+ Add -bip flag to allow specification of dynamic bridge IP via CIDR
- Allow bridge creation when ipv6 is not enabled on certain systems
* Set hostname and IP address from within dockerinit
* Drop capabilities from within dockerinit
- Fix volumes on host when symlink is present the image
- Prevent deletion of image if ANY container is depending on it even if the container is not running
* Update docker push to use new progress display
* Use os.Lstat to allow mounting unix sockets when inspecting volumes
- Adjusted handling of inactive user login
- Add missing defines in devicemapper for older kernels
- Allow untag operations with no container validation
- Add auth config to docker build
#### Documentation
* Add more information about Docker logging
+ Add RHEL documentation
* Add a direct example for changing the CMD that is run in a container
* Update Arch installation documentation
+ Add section on Trusted Builds
+ Add Network documentation page
#### Other
+ Add new cover bundle for providing code coverage reporting
* Separate integration tests in bundles
* Make Tianon the hack maintainer
* Update mkimage-debootstrap with more tweaks for keeping images small
* Use https to get the install script
* Remove vendored dotcloud/tar now that Go 1.2 has been released
## 0.7.1 (2013-12-05)
#### Documentation
+ Add @SvenDowideit as documentation maintainer
+ Add links example
+ Add documentation regarding ambassador pattern
+ Add Google Cloud Platform docs
+ Add dockerfile best practices
* Update doc for RHEL
* Update doc for registry
* Update Postgres examples
* Update doc for Ubuntu install
* Improve remote api doc
#### Runtime
+ Add hostconfig to docker inspect
+ Implement `docker log -f` to stream logs
+ Add env variable to disable kernel version warning
+ Add -format to `docker inspect`
+ Support bind-mount for files
- Fix bridge creation on RHEL
- Fix image size calculation
- Make sure iptables are called even if the bridge already exists
- Fix issue with stderr only attach
- Remove init layer when destroying a container
- Fix same port binding on different interfaces
- `docker build` now returns the correct exit code
- Fix `docker port` to display correct port
- `docker build` now check that the dockerfile exists client side
- `docker attach` now returns the correct exit code
- Remove the name entry when the container does not exist
#### Registry
* Improve progress bars, add ETA for downloads
* Simultaneous pulls now waits for the first to finish instead of failing
- Tag only the top-layer image when pushing to registry
- Fix issue with offline image transfer
- Fix issue preventing using ':' in password for registry
#### Other
+ Add pprof handler for debug
+ Create a Makefile
* Use stdlib tar that now includes fix
* Improve make.sh test script
* Handle SIGQUIT on the daemon
* Disable verbose during tests
* Upgrade to go1.2 for official build
* Improve unit tests
* The test suite now runs all tests even if one fails
* Refactor C in Go (Devmapper)
- Fix OSX compilation
## 0.7.0 (2013-11-25)
#### Notable features since 0.6.0
* Storage drivers: choose from aufs, device-mapper, or vfs.
* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions.
* Links: compose complex software stacks by connecting containers to each other.
* Container naming: organize your containers by giving them memorable names.
* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private.
* Offline transfer: push and pull images to the filesystem without losing information.
* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage.
## 0.6.7 (2013-11-21)
#### Runtime
* Improved stability, fixes some race conditons
* Skip the volumes mounted when deleting the volumes of container.
* Fix layer size computation: handle hard links correctly
* Use the work Path for docker cp CONTAINER:PATH
* Fix tmp dir never cleanup
* Speedup docker ps
* More informative error message on name collisions
* Fix nameserver regex
* Always return long id's
* Fix container restart race condition
* Keep published ports on docker stop;docker start
* Fix container networking on Fedora
* Correctly express "any address" to iptables
* Fix network setup when reconnecting to ghost container
* Prevent deletion if image is used by a running container
* Lock around read operations in graph
#### RemoteAPI
* Return full ID on docker rmi
#### Client
+ Add -tree option to images
+ Offline image transfer
* Exit with status 2 on usage error and display usage on stderr
* Do not forward SIGCHLD to container
* Use string timestamp for docker events -since
#### Other
* Update to go 1.2rc5
+ Add /etc/default/docker support to upstart
## 0.6.6 (2013-11-06)
#### Runtime
@@ -17,6 +161,7 @@
+ Prevent DNS server conflicts in CreateBridgeIface
+ Validate bind mounts on the server side
+ Use parent image config in docker build
* Fix regression in /etc/hosts
#### Client

View File

@@ -4,6 +4,13 @@ Want to hack on Docker? Awesome! Here are instructions to get you
started. They are probably not perfect, please let us know if anything
feels wrong or incomplete.
## Reporting Issues
When reporting [issues](https://github.com/dotcloud/docker/issues)
on Github please include your host OS ( Ubuntu 12.04, Fedora 19, etc... )
and the output of `docker version` along with the output of `docker info` if possible.
This information will help us review and fix your issue faster.
## Build Environment
For instructions on setting up your development environment, please
@@ -64,7 +71,7 @@ your branch before submitting a pull request.
Update the documentation when creating or modifying features. Test
your documentation changes for clarity, concision, and correctness, as
well as a clean docmuent build. See ``docs/README.md`` for more
well as a clean documentation build. See ``docs/README.md`` for more
information on building the docs and how docs get released.
Write clean code. Universally formatted code promotes ease of writing, reading,
@@ -115,6 +122,7 @@ For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
* Step 1: learn the component inside out
* Step 2: make yourself useful by contributing code, bugfixes, support etc.
* Step 3: volunteer on the irc channel (#docker@freenode)
* Step 4: propose yourself at a scheduled #docker-meeting
Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available.
You don't have to be a maintainer to make a difference on the project!

View File

@@ -24,53 +24,55 @@
#
docker-version 0.6.1
from ubuntu:12.04
maintainer Solomon Hykes <solomon@dotcloud.com>
FROM ubuntu:12.04
MAINTAINER Solomon Hykes <solomon@dotcloud.com>
# Build dependencies
run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
run apt-get update
run apt-get install -y -q curl
run apt-get install -y -q git
run apt-get install -y -q mercurial
run apt-get install -y -q build-essential libsqlite3-dev
RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
RUN apt-get update
RUN apt-get install -y -q curl
RUN apt-get install -y -q git
RUN apt-get install -y -q mercurial
RUN apt-get install -y -q build-essential libsqlite3-dev
# Install Go
run curl -s https://go.googlecode.com/files/go1.2rc5.src.tar.gz | tar -v -C /usr/local -xz
env PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
env GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
run cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
RUN curl -s https://go.googlecode.com/files/go1.2.src.tar.gz | tar -v -C /usr/local -xz
ENV PATH /usr/local/go/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
RUN cd /usr/local/go/src && ./make.bash && go install -ldflags '-w -linkmode external -extldflags "-static -Wl,--unresolved-symbols=ignore-in-shared-libs"' -tags netgo -a std
# Ubuntu stuff
run apt-get install -y -q ruby1.9.3 rubygems libffi-dev
run gem install --no-rdoc --no-ri fpm
run apt-get install -y -q reprepro dpkg-sig
RUN apt-get install -y -q ruby1.9.3 rubygems libffi-dev
RUN gem install --no-rdoc --no-ri fpm
RUN apt-get install -y -q reprepro dpkg-sig
# Install s3cmd 1.0.1 (earlier versions don't support env variables in the config)
run apt-get install -y -q python-pip
run pip install s3cmd
run pip install python-magic
run /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
RUN apt-get install -y -q python-pip
RUN pip install s3cmd==1.1.0-beta3
RUN pip install python-magic==0.4.6
RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY\n' > /.s3cfg
# Runtime dependencies
run apt-get install -y -q iptables
run apt-get install -y -q lxc
run apt-get install -y -q aufs-tools
RUN apt-get install -y -q iptables
RUN apt-get install -y -q lxc
RUN apt-get install -y -q aufs-tools
# Get lvm2 source for compiling statically
run git clone git://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout v2_02_103
RUN git clone https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout v2_02_103
# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# note: we can't use "git clone -b" above because it requires at least git 1.7.10 to be able to use that on a tag instead of a branch and we only have 1.7.9.5
# Compile and install lvm2
run cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
volume /var/lib/docker
workdir /go/src/github.com/dotcloud/docker
# Grab Go's cover tool for dead-simple code coverage testing
RUN go get code.google.com/p/go.tools/cmd/cover
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/dotcloud/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers
entrypoint ["hack/dind"]
ENTRYPOINT ["hack/dind"]
# Upload docker source
add . /go/src/github.com/dotcloud/docker
ADD . /go/src/github.com/dotcloud/docker

26
Makefile Normal file
View File

@@ -0,0 +1,26 @@
.PHONY: all binary build default docs shell test
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
default: binary
all: build
$(DOCKER_RUN_DOCKER) hack/make.sh
binary: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary
docs:
docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
test: build
$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
shell: build
$(DOCKER_RUN_DOCKER) bash
build: bundles
docker build -t docker .
bundles:
mkdir bundles

View File

@@ -1 +1 @@
0.6.6-dev
0.7.2

7
Vagrantfile vendored
View File

@@ -70,7 +70,7 @@ SCRIPT
# trigger dkms to build the virtualbox guest module install.
$vbox_script = <<VBOX_SCRIPT + $script
# Install the VirtualBox guest additions if they aren't already installed.
if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
if [ ! -d /opt/VBoxGuestAdditions-4.3.4/ ]; then
# Update remote package metadata. 'apt-get update' is idempotent.
apt-get update -q
@@ -79,9 +79,10 @@ if [ ! -d /opt/VBoxGuestAdditions-4.3.2/ ]; then
apt-get install -q -y linux-headers-generic-lts-raring dkms
echo 'Downloading VBox Guest Additions...'
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.2/VBoxGuestAdditions_4.3.2.iso
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.4/VBoxGuestAdditions_4.3.4.iso
echo "f120793fa35050a8280eacf9c930cf8d9b88795161520f6515c0cc5edda2fe8a VBoxGuestAdditions_4.3.4.iso" | sha256sum --check || exit 1
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.2.iso /mnt
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.4.iso /mnt
/mnt/VBoxLinuxAdditions.run --nox11
umount /mnt
fi

218
api.go
View File

@@ -1,12 +1,16 @@
package docker
import (
"bufio"
"bytes"
"code.google.com/p/go.net/websocket"
"encoding/base64"
"encoding/json"
"expvar"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/systemd"
"github.com/dotcloud/docker/utils"
"github.com/gorilla/mux"
"io"
@@ -15,6 +19,7 @@ import (
"mime"
"net"
"net/http"
"net/http/pprof"
"os"
"os/exec"
"regexp"
@@ -23,7 +28,7 @@ import (
)
const (
APIVERSION = 1.7
APIVERSION = 1.8
DEFAULTHTTPHOST = "127.0.0.1"
DEFAULTHTTPPORT = 4243
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
@@ -135,7 +140,8 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque
}
func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return writeJSON(w, http.StatusOK, srv.DockerVersion())
srv.Eng.ServeHTTP(w, r)
return nil
}
func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -145,19 +151,11 @@ func postContainersKill(srv *Server, version float64, w http.ResponseWriter, r *
if err := parseForm(r); err != nil {
return err
}
name := vars["name"]
signal := 0
if r != nil {
if s := r.Form.Get("signal"); s != "" {
s, err := strconv.Atoi(s)
if err != nil {
return err
}
signal = s
}
job := srv.Eng.Job("kill", vars["name"])
if sig := r.Form.Get("signal"); sig != "" {
job.Args = append(job.Args, sig)
}
if err := srv.ContainerKill(name, signal); err != nil {
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
@@ -168,10 +166,11 @@ func getContainersExport(srv *Server, version float64, w http.ResponseWriter, r
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
if err := srv.ContainerExport(name, w); err != nil {
utils.Errorf("%s", err)
job := srv.Eng.Job("export", vars["name"])
if err := job.Stdout.Add(w); err != nil {
return err
}
if err := job.Run(); err != nil {
return err
}
return nil
@@ -217,7 +216,8 @@ func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.R
}
func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return writeJSON(w, http.StatusOK, srv.DockerInfo())
srv.Eng.ServeHTTP(w, r)
return nil
}
func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -254,7 +254,7 @@ func getEvents(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
wf.Flush()
if since != 0 {
// If since, send previous events that happened after the timestamp
for _, event := range srv.events {
for _, event := range srv.GetEvents() {
if event.Time >= since {
err := sendEvent(wf, &event)
if err != nil && err.Error() == "JSON error" {
@@ -357,18 +357,13 @@ func postImagesTag(srv *Server, version float64, w http.ResponseWriter, r *http.
if err := parseForm(r); err != nil {
return err
}
repo := r.Form.Get("repo")
tag := r.Form.Get("tag")
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
force, err := getBoolParam(r.Form.Get("force"))
if err != nil {
return err
}
if err := srv.ContainerTag(name, repo, tag, force); err != nil {
job := srv.Eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag"))
job.Setenv("force", r.Form.Get("force"))
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
@@ -383,13 +378,17 @@ func postCommit(srv *Server, version float64, w http.ResponseWriter, r *http.Req
if err := json.NewDecoder(r.Body).Decode(config); err != nil && err != io.EOF {
utils.Errorf("%s", err)
}
repo := r.Form.Get("repo")
tag := r.Form.Get("tag")
container := r.Form.Get("container")
author := r.Form.Get("author")
comment := r.Form.Get("comment")
id, err := srv.ContainerCommit(container, repo, tag, author, comment, config)
if err != nil {
job := srv.Eng.Job("commit", r.Form.Get("container"))
job.Setenv("repo", r.Form.Get("repo"))
job.Setenv("tag", r.Form.Get("tag"))
job.Setenv("author", r.Form.Get("author"))
job.Setenv("comment", r.Form.Get("comment"))
job.SetenvJson("config", config)
var id string
job.Stdout.AddString(&id)
if err := job.Run(); err != nil {
return err
}
@@ -534,6 +533,18 @@ func postImagesPush(srv *Server, version float64, w http.ResponseWriter, r *http
return nil
}
func getImagesGet(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
name := vars["name"]
if version > 1.0 {
w.Header().Set("Content-Type", "application/x-tar")
}
return srv.ImageExport(name, w)
}
func postImagesLoad(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return srv.ImageLoad(r.Body)
}
func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return nil
@@ -552,12 +563,18 @@ func postContainersCreate(srv *Server, version float64, w http.ResponseWriter, r
job.SetenvList("Dns", defaultDns)
}
// Read container ID from the first line of stdout
job.StdoutParseString(&out.ID)
job.Stdout.AddString(&out.ID)
// Read warnings from stderr
job.StderrParseLines(&out.Warnings, 0)
warnings := &bytes.Buffer{}
job.Stderr.Add(warnings)
if err := job.Run(); err != nil {
return err
}
// Parse warnings from stderr
scanner := bufio.NewScanner(warnings)
for scanner.Scan() {
out.Warnings = append(out.Warnings, scanner.Text())
}
if job.GetenvInt("Memory") > 0 && !srv.runtime.capabilities.MemoryLimit {
log.Println("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.")
out.Warnings = append(out.Warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
@@ -666,17 +683,12 @@ func postContainersStop(srv *Server, version float64, w http.ResponseWriter, r *
if err := parseForm(r); err != nil {
return err
}
t, err := strconv.Atoi(r.Form.Get("t"))
if err != nil || t < 0 {
t = 10
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
if err := srv.ContainerStop(name, t); err != nil {
job := srv.Eng.Job("stop", vars["name"])
job.Setenv("t", r.Form.Get("t"))
if err := job.Run(); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
@@ -687,33 +699,28 @@ func postContainersWait(srv *Server, version float64, w http.ResponseWriter, r *
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
status, err := srv.ContainerWait(name)
job := srv.Eng.Job("wait", vars["name"])
var statusStr string
job.Stdout.AddString(&statusStr)
if err := job.Run(); err != nil {
return err
}
// Parse a 16-bit encoded integer to map typical unix exit status.
status, err := strconv.ParseInt(statusStr, 10, 16)
if err != nil {
return err
}
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: status})
return writeJSON(w, http.StatusOK, &APIWait{StatusCode: int(status)})
}
func postContainersResize(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
}
height, err := strconv.Atoi(r.Form.Get("h"))
if err != nil {
return err
}
width, err := strconv.Atoi(r.Form.Get("w"))
if err != nil {
return err
}
if vars == nil {
return fmt.Errorf("Missing parameter")
}
name := vars["name"]
if err := srv.ContainerResize(name, height, width); err != nil {
if err := srv.Eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil {
return err
}
return nil
@@ -853,7 +860,10 @@ func getContainersByName(srv *Server, version float64, w http.ResponseWriter, r
return fmt.Errorf("Conflict between containers and images")
}
return writeJSON(w, http.StatusOK, container)
container.readHostConfig()
c := APIContainer{container, container.hostConfig}
return writeJSON(w, http.StatusOK, c)
}
func getImagesByName(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -879,12 +889,25 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
if version < 1.3 {
return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.")
}
remoteURL := r.FormValue("remote")
repoName := r.FormValue("t")
rawSuppressOutput := r.FormValue("q")
rawNoCache := r.FormValue("nocache")
rawRm := r.FormValue("rm")
repoName, tag := utils.ParseRepositoryTag(repoName)
var (
remoteURL = r.FormValue("remote")
repoName = r.FormValue("t")
rawSuppressOutput = r.FormValue("q")
rawNoCache = r.FormValue("nocache")
rawRm = r.FormValue("rm")
authEncoded = r.Header.Get("X-Registry-Auth")
authConfig = &auth.AuthConfig{}
tag string
)
repoName, tag = utils.ParseRepositoryTag(repoName)
if authEncoded != "" {
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
authConfig = &auth.AuthConfig{}
}
}
var context io.Reader
@@ -910,7 +933,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
}
context = c
} else if utils.IsURL(remoteURL) {
f, err := utils.Download(remoteURL, ioutil.Discard)
f, err := utils.Download(remoteURL)
if err != nil {
return err
}
@@ -939,9 +962,26 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
return err
}
b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput, !noCache, rm)
if version >= 1.8 {
w.Header().Set("Content-Type", "application/json")
}
sf := utils.NewStreamFormatter(version >= 1.8)
b := NewBuildFile(srv,
&StdoutFormater{
Writer: utils.NewWriteFlusher(w),
StreamFormatter: sf,
},
&StderrFormater{
Writer: utils.NewWriteFlusher(w),
StreamFormatter: sf,
},
!suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf, authConfig)
id, err := b.Build(context)
if err != nil {
if sf.Used() {
w.Write(sf.FormatError(err))
return nil
}
return fmt.Errorf("Error build: %s", err)
}
if repoName != "" {
@@ -967,7 +1007,7 @@ func postContainersCopy(srv *Server, version float64, w http.ResponseWriter, r *
}
if copyData.Resource == "" {
return fmt.Errorf("Resource cannot be empty")
return fmt.Errorf("Path cannot be empty")
}
if copyData.Resource[0] == '/' {
copyData.Resource = copyData.Resource[1:]
@@ -1025,9 +1065,37 @@ func makeHttpHandler(srv *Server, logging bool, localMethod string, localRoute s
}
}
// Replicated from expvar.go as not public.
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func AttachProfiler(router *mux.Router) {
router.HandleFunc("/debug/vars", expvarHandler)
router.HandleFunc("/debug/pprof/", pprof.Index)
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
}
func createRouter(srv *Server, logging bool) (*mux.Router, error) {
r := mux.NewRouter()
if os.Getenv("DEBUG") != "" {
AttachProfiler(r)
}
m := map[string]map[string]HttpApiFunc{
"GET": {
"/events": getEvents,
@@ -1036,6 +1104,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
"/images/json": getImagesJSON,
"/images/viz": getImagesViz,
"/images/search": getImagesSearch,
"/images/{name:.*}/get": getImagesGet,
"/images/{name:.*}/history": getImagesHistory,
"/images/{name:.*}/json": getImagesByName,
"/containers/ps": getContainersJSON,
@@ -1052,6 +1121,7 @@ func createRouter(srv *Server, logging bool) (*mux.Router, error) {
"/build": postBuild,
"/images/create": postImagesCreate,
"/images/{name:.*}/insert": postImagesInsert,
"/images/load": postImagesLoad,
"/images/{name:.*}/push": postImagesPush,
"/images/{name:.*}/tag": postImagesTag,
"/containers/create": postContainersCreate,
@@ -1112,8 +1182,6 @@ func ServeRequest(srv *Server, apiversion float64, w http.ResponseWriter, req *h
}
func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
r, err := createRouter(srv, logging)
if err != nil {
return err
@@ -1144,5 +1212,9 @@ func ListenAndServe(proto, addr string, srv *Server, logging bool) error {
}
}
httpSrv := http.Server{Addr: addr, Handler: r}
log.Printf("Listening for HTTP on %s (%s)\n", addr, proto)
// Tell the init daemon we are accepting requests
go systemd.SdNotify("READY=1")
return httpSrv.Serve(l)
}

View File

@@ -29,23 +29,6 @@ type (
VirtualSize int64
}
APIInfo struct {
Debug bool
Containers int
Images int
Driver string `json:",omitempty"`
DriverStatus [][2]string `json:",omitempty"`
NFd int `json:",omitempty"`
NGoroutines int `json:",omitempty"`
MemoryLimit bool `json:",omitempty"`
SwapLimit bool `json:",omitempty"`
IPv4Forwarding bool `json:",omitempty"`
LXCVersion string `json:",omitempty"`
NEventsListener int `json:",omitempty"`
KernelVersion string `json:",omitempty"`
IndexServerAddress string `json:",omitempty"`
}
APITop struct {
Titles []string
Processes [][]string
@@ -95,12 +78,6 @@ type (
IP string
}
APIVersion struct {
Version string
GitCommit string `json:",omitempty"`
GoVersion string `json:",omitempty"`
}
APIWait struct {
StatusCode int
}
@@ -118,6 +95,10 @@ type (
Resource string
HostPath string
}
APIContainer struct {
*Container
HostConfig *HostConfig
}
)
func (api APIImages) ToLegacy() []APIImagesOld {

View File

@@ -181,7 +181,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
oldStat.Rdev != newStat.Rdev ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Size != newStat.Size && oldStat.Mode&syscall.S_IFDIR != syscall.S_IFDIR) ||
oldStat.Mtim != newStat.Mtim {
getLastModification(oldStat) != getLastModification(newStat) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,

View File

@@ -71,17 +71,27 @@ func createSampleDir(t *testing.T, root string) {
{Symlink, "symlink1", "target1", 0666},
{Symlink, "symlink2", "target2", 0666},
}
now := time.Now()
for _, info := range files {
p := path.Join(root, info.path)
if info.filetype == Dir {
if err := os.MkdirAll(path.Join(root, info.path), info.permissions); err != nil {
if err := os.MkdirAll(p, info.permissions); err != nil {
t.Fatal(err)
}
} else if info.filetype == Regular {
if err := ioutil.WriteFile(path.Join(root, info.path), []byte(info.contents), info.permissions); err != nil {
if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
t.Fatal(err)
}
} else if info.filetype == Symlink {
if err := os.Symlink(info.contents, path.Join(root, info.path)); err != nil {
if err := os.Symlink(info.contents, p); err != nil {
t.Fatal(err)
}
}
if info.filetype != Symlink {
// Set a consistent ctime, atime for all files and dirs
if err := os.Chtimes(p, now, now); err != nil {
t.Fatal(err)
}
}
@@ -200,6 +210,9 @@ func TestChangesDirsMutated(t *testing.T) {
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(src)
defer os.RemoveAll(dst)
mutateSampleDir(t, dst)
changes, err := ChangesDirs(dst, src)
@@ -225,8 +238,7 @@ func TestChangesDirsMutated(t *testing.T) {
{"/symlinknew", ChangeAdd},
}
i := 0
for ; i < max(len(changes), len(expectedChanges)); i++ {
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
if i >= len(expectedChanges) {
t.Fatalf("unexpected change %s\n", changes[i].String())
}
@@ -235,64 +247,59 @@ func TestChangesDirsMutated(t *testing.T) {
}
if changes[i].Path == expectedChanges[i].Path {
if changes[i] != expectedChanges[i] {
t.Fatalf("Wrong change for %s, expected %s, got %d\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
}
} else if changes[i].Path < expectedChanges[i].Path {
t.Fatalf("unexpected change %s\n", changes[i].String())
} else {
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
}
}
for ; i < len(expectedChanges); i++ {
}
os.RemoveAll(src)
os.RemoveAll(dst)
}
func TestApplyLayer(t *testing.T) {
t.Skip("Skipping TestApplyLayer due to known failures") // Disable this for now as it is broken
return
src, err := ioutil.TempDir("", "docker-changes-test")
if err != nil {
t.Fatal(err)
}
createSampleDir(t, src)
dst := src + "-copy"
if err := copyDir(src, dst); err != nil {
t.Fatal(err)
}
mutateSampleDir(t, dst)
// src, err := ioutil.TempDir("", "docker-changes-test")
// if err != nil {
// t.Fatal(err)
// }
// createSampleDir(t, src)
// dst := src + "-copy"
// if err := copyDir(src, dst); err != nil {
// t.Fatal(err)
// }
// mutateSampleDir(t, dst)
changes, err := ChangesDirs(dst, src)
if err != nil {
t.Fatal(err)
}
// changes, err := ChangesDirs(dst, src)
// if err != nil {
// t.Fatal(err)
// }
layer, err := ExportChanges(dst, changes)
if err != nil {
t.Fatal(err)
}
// layer, err := ExportChanges(dst, changes)
// if err != nil {
// t.Fatal(err)
// }
layerCopy, err := NewTempArchive(layer, "")
if err != nil {
t.Fatal(err)
}
// layerCopy, err := NewTempArchive(layer, "")
// if err != nil {
// t.Fatal(err)
// }
if err := ApplyLayer(src, layerCopy); err != nil {
t.Fatal(err)
}
// if err := ApplyLayer(src, layerCopy); err != nil {
// t.Fatal(err)
// }
changes2, err := ChangesDirs(src, dst)
if err != nil {
t.Fatal(err)
}
// changes2, err := ChangesDirs(src, dst)
// if err != nil {
// t.Fatal(err)
// }
if len(changes2) != 0 {
t.Fatalf("Unexpected differences after re applying mutation: %v", changes)
}
// if len(changes2) != 0 {
// t.Fatalf("Unexpected differences after re applying mutation: %v", changes)
// }
os.RemoveAll(src)
os.RemoveAll(dst)
// os.RemoveAll(src)
// os.RemoveAll(dst)
}

View File

@@ -83,8 +83,10 @@ func ApplyLayer(dest string, layer Archive) error {
}
for k, v := range modifiedDirs {
aTime := time.Unix(v.Atim.Unix())
mTime := time.Unix(v.Mtim.Unix())
lastAccess := getLastAccess(v)
lastModification := getLastModification(v)
aTime := time.Unix(lastAccess.Unix())
mTime := time.Unix(lastModification.Unix())
if err := os.Chtimes(k, aTime, mTime); err != nil {
return err

11
archive/stat_darwin.go Normal file
View File

@@ -0,0 +1,11 @@
package archive
import "syscall"
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
return stat.Atimespec
}
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
return stat.Mtimespec
}

11
archive/stat_linux.go Normal file
View File

@@ -0,0 +1,11 @@
package archive
import "syscall"
func getLastAccess(stat *syscall.Stat_t) syscall.Timespec {
return stat.Atim
}
func getLastModification(stat *syscall.Stat_t) syscall.Timespec {
return stat.Mtim
}

View File

@@ -63,7 +63,7 @@ func decodeAuth(authStr string) (string, string, error) {
if n > decLen {
return "", "", fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.Split(string(decoded), ":")
arr := strings.SplitN(string(decoded), ":", 2)
if len(arr) != 2 {
return "", "", fmt.Errorf("Invalid auth configuration file")
}
@@ -192,13 +192,6 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
} else {
status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it."
}
} else if reqStatusCode == 403 {
if loginAgainstOfficialIndex {
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please check your e-mail for a confirmation link.")
}
return "", fmt.Errorf("Login: Your account hasn't been activated. " +
"Please see the documentation of the registry " + serverAddress + " for instructions how to activate it.")
} else if reqStatusCode == 400 {
if string(reqBody) == "\"Username or email already exists\"" {
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
@@ -216,13 +209,39 @@ func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, e
status = "Login Succeeded"
} else if resp.StatusCode == 401 {
return "", fmt.Errorf("Wrong login/password, please try again")
} else if resp.StatusCode == 403 {
if loginAgainstOfficialIndex {
return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.")
}
return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
} else {
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
resp.StatusCode, resp.Header)
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header)
}
} else {
return "", fmt.Errorf("Registration: %s", reqBody)
}
} else if reqStatusCode == 401 {
// This case would happen with private registries where /v1/users is
// protected, so people can use `docker login` as an auth check.
req, err := factory.NewRequest("GET", serverAddress+"users/", nil)
req.SetBasicAuth(authConfig.Username, authConfig.Password)
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
if resp.StatusCode == 200 {
status = "Login Succeeded"
} else if resp.StatusCode == 401 {
return "", fmt.Errorf("Wrong login/password, please try again")
} else {
return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
resp.StatusCode, resp.Header)
}
} else {
return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody)
}

View File

@@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -33,17 +34,24 @@ type buildFile struct {
utilizeCache bool
rm bool
authConfig *auth.AuthConfig
tmpContainers map[string]struct{}
tmpImages map[string]struct{}
out io.Writer
outStream io.Writer
errStream io.Writer
// Deprecated, original writer used for ImagePull. To be removed.
outOld io.Writer
sf *utils.StreamFormatter
}
func (b *buildFile) clearTmp(containers map[string]struct{}) {
for c := range containers {
tmp := b.runtime.Get(c)
b.runtime.Destroy(tmp)
fmt.Fprintf(b.out, "Removing intermediate container %s\n", utils.TruncateID(c))
fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
}
}
@@ -52,7 +60,7 @@ func (b *buildFile) CmdFrom(name string) error {
if err != nil {
if b.runtime.graph.IsNotExist(err) {
remote, tag := utils.ParseRepositoryTag(name)
if err := b.srv.ImagePull(remote, tag, b.out, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
if err := b.srv.ImagePull(remote, tag, b.outOld, b.sf, b.authConfig, nil, true); err != nil {
return err
}
image, err = b.runtime.repositories.LookupImage(name)
@@ -100,7 +108,7 @@ func (b *buildFile) CmdRun(args string) error {
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
return err
} else if cache != nil {
fmt.Fprintf(b.out, " ---> Using cache\n")
fmt.Fprintf(b.outStream, " ---> Using cache\n")
utils.Debugf("[BUILDER] Use cached version")
b.image = cache.ID
return nil
@@ -241,7 +249,7 @@ func (b *buildFile) CmdVolume(args string) error {
volume = []string{args}
}
if b.config.Volumes == nil {
b.config.Volumes = NewPathOpts()
b.config.Volumes = map[string]struct{}{}
}
for _, v := range volume {
b.config.Volumes[v] = struct{}{}
@@ -253,7 +261,7 @@ func (b *buildFile) CmdVolume(args string) error {
}
func (b *buildFile) addRemote(container *Container, orig, dest string) error {
file, err := utils.Download(orig, ioutil.Discard)
file, err := utils.Download(orig)
if err != nil {
return err
}
@@ -288,7 +296,7 @@ func (b *buildFile) addContext(container *Container, orig, dest string) error {
destPath = destPath + "/"
}
if !strings.HasPrefix(origPath, b.context) {
return fmt.Errorf("Forbidden path: %s", origPath)
return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
}
fi, err := os.Stat(origPath)
if err != nil {
@@ -364,6 +372,34 @@ func (b *buildFile) CmdAdd(args string) error {
return nil
}
type StdoutFormater struct {
io.Writer
*utils.StreamFormatter
}
func (sf *StdoutFormater) Write(buf []byte) (int, error) {
formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
n, err := sf.Writer.Write(formattedBuf)
if n != len(formattedBuf) {
return n, io.ErrShortWrite
}
return len(buf), err
}
type StderrFormater struct {
io.Writer
*utils.StreamFormatter
}
func (sf *StderrFormater) Write(buf []byte) (int, error) {
formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
n, err := sf.Writer.Write(formattedBuf)
if n != len(formattedBuf) {
return n, io.ErrShortWrite
}
return len(buf), err
}
func (b *buildFile) run() (string, error) {
if b.image == "" {
return "", fmt.Errorf("Please provide a source image with `from` prior to run")
@@ -376,7 +412,7 @@ func (b *buildFile) run() (string, error) {
return "", err
}
b.tmpContainers[c.ID] = struct{}{}
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(c.ID))
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
// override the entry point that may have been picked up from the base image
c.Path = b.config.Cmd[0]
@@ -386,7 +422,7 @@ func (b *buildFile) run() (string, error) {
if b.verbose {
errCh = utils.Go(func() error {
return <-c.Attach(nil, nil, b.out, b.out)
return <-c.Attach(nil, nil, b.outStream, b.errStream)
})
}
@@ -403,7 +439,11 @@ func (b *buildFile) run() (string, error) {
// Wait for it to finish
if ret := c.Wait(); ret != 0 {
return "", fmt.Errorf("The command %v returned a non-zero code: %d", b.config.Cmd, ret)
err := &utils.JSONError{
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
Code: ret,
}
return "", err
}
return c.ID, nil
@@ -424,7 +464,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
return err
} else if cache != nil {
fmt.Fprintf(b.out, " ---> Using cache\n")
fmt.Fprintf(b.outStream, " ---> Using cache\n")
utils.Debugf("[BUILDER] Use cached version")
b.image = cache.ID
return nil
@@ -438,10 +478,10 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
return err
}
for _, warning := range warnings {
fmt.Fprintf(b.out, " ---> [Warning] %s\n", warning)
fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
}
b.tmpContainers[container.ID] = struct{}{}
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
id = container.ID
if err := container.EnsureMounted(); err != nil {
return err
@@ -507,22 +547,22 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
if !exists {
fmt.Fprintf(b.out, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
continue
}
stepN += 1
fmt.Fprintf(b.out, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
fmt.Fprintf(b.outStream, "Step %d : %s %s\n", stepN, strings.ToUpper(instruction), arguments)
ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
if ret != nil {
return "", ret.(error)
}
fmt.Fprintf(b.out, " ---> %v\n", utils.TruncateID(b.image))
fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
}
if b.image != "" {
fmt.Fprintf(b.out, "Successfully built %s\n", utils.TruncateID(b.image))
fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
if b.rm {
b.clearTmp(b.tmpContainers)
}
@@ -531,16 +571,20 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
return "", fmt.Errorf("An error occurred during the build\n")
}
func NewBuildFile(srv *Server, out io.Writer, verbose, utilizeCache, rm bool) BuildFile {
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile {
return &buildFile{
runtime: srv.runtime,
srv: srv,
config: &Config{},
out: out,
outStream: outStream,
errStream: errStream,
tmpContainers: make(map[string]struct{}),
tmpImages: make(map[string]struct{}),
verbose: verbose,
utilizeCache: utilizeCache,
rm: rm,
sf: sf,
authConfig: auth,
outOld: outOld,
}
}

File diff suppressed because it is too large Load Diff

157
commands_unit_test.go Normal file
View File

@@ -0,0 +1,157 @@
package docker
import (
"strings"
"testing"
)
func parse(t *testing.T, args string) (*Config, *HostConfig, error) {
config, hostConfig, _, err := ParseRun(strings.Split(args+" ubuntu bash", " "), nil)
return config, hostConfig, err
}
func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
config, hostConfig, err := parse(t, args)
if err != nil {
t.Fatal(err)
}
return config, hostConfig
}
func TestParseRunLinks(t *testing.T) {
if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
}
if _, _, err := parse(t, "-link a"); err == nil {
t.Fatalf("Error parsing links. `-link a` should be an error but is not")
}
if _, _, err := parse(t, "-link"); err == nil {
t.Fatalf("Error parsing links. `-link` should be an error but is not")
}
}
func TestParseRunAttach(t *testing.T) {
if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if _, _, err := parse(t, "-a"); err == nil {
t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
}
if _, _, err := parse(t, "-a invalid"); err == nil {
t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
}
if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
}
if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
}
if _, _, err := parse(t, "-a stdin -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
}
if _, _, err := parse(t, "-a stdout -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
}
if _, _, err := parse(t, "-a stderr -d"); err == nil {
t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
}
if _, _, err := parse(t, "-d -rm"); err == nil {
t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
}
}
func TestParseRunVolumes(t *testing.T) {
if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/tmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/tmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
} else if _, exists := config.Volumes["/var"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
} else if _, exists := config.Volumes["/containerVar"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
} else if _, exists := config.Volumes["/containerVar"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
} else if _, exists := config.Volumes["/containerTmp"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
} else if _, exists := config.Volumes["/containerVar"]; !exists {
t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
}
if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
} else if len(config.Volumes) != 0 {
t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
}
mustParse(t, "-v /")
if _, _, err := parse(t, "-v /:/"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
}
if _, _, err := parse(t, "-v"); err == nil {
t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp:"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp::"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
}
if _, _, err := parse(t, "-v :"); err == nil {
t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
}
if _, _, err := parse(t, "-v ::"); err == nil {
t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
}
if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
}
}

View File

@@ -14,6 +14,7 @@ type DaemonConfig struct {
Dns []string
EnableIptables bool
BridgeIface string
BridgeIp string
DefaultIp net.IP
InterContainerCommunication bool
GraphDriver string
@@ -27,8 +28,8 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
config.Root = job.Getenv("Root")
config.AutoRestart = job.GetenvBool("AutoRestart")
config.EnableCors = job.GetenvBool("EnableCors")
if dns := job.Getenv("Dns"); dns != "" {
config.Dns = []string{dns}
if dns := job.GetenvList("Dns"); dns != nil {
config.Dns = dns
}
config.EnableIptables = job.GetenvBool("EnableIptables")
if br := job.Getenv("BridgeIface"); br != "" {
@@ -36,6 +37,7 @@ func ConfigFromJob(job *engine.Job) *DaemonConfig {
} else {
config.BridgeIface = DefaultNetworkBridge
}
config.BridgeIp = job.Getenv("BridgeIp")
config.DefaultIp = net.ParseIP(job.Getenv("DefaultIp"))
config.InterContainerCommunication = job.GetenvBool("InterContainerCommunication")
config.GraphDriver = job.Getenv("GraphDriver")

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/graphdriver"
@@ -18,14 +17,20 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
)
var (
ErrNotATTY = errors.New("The PTY is not a file")
ErrNoTTY = errors.New("No PTY found")
)
type Container struct {
sync.Mutex
root string // Path to the "home" of the container, including metadata.
rootfs string // Path to the root filesystem of the container.
@@ -159,218 +164,6 @@ func NewPort(proto, port string) Port {
return Port(fmt.Sprintf("%s/%s", port, proto))
}
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
if os.Getenv("TEST") != "" {
cmd.SetOutput(ioutil.Discard)
cmd.Usage = nil
}
flHostname := cmd.String("h", "", "Container host name")
flWorkingDir := cmd.String("w", "", "Working directory inside the container")
flUser := cmd.String("u", "", "Username or UID")
flDetach := cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
flAttach := NewAttachOpts()
cmd.Var(flAttach, "a", "Attach to stdin, stdout or stderr.")
flStdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemoryString := cmd.String("m", "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
flContainerIDFile := cmd.String("cidfile", "", "Write the container ID to the file")
flNetwork := cmd.Bool("n", true, "Enable networking for this container")
flPrivileged := cmd.Bool("privileged", false, "Give extended privileges to this container")
flAutoRemove := cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
cmd.String("name", "", "Assign a name to the container")
flPublishAll := cmd.Bool("P", false, "Publish all exposed ports to the host interfaces")
if capabilities != nil && *flMemoryString != "" && !capabilities.MemoryLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemoryString = ""
}
flCpuShares := cmd.Int64("c", 0, "CPU shares (relative weight)")
var flPublish utils.ListOpts
cmd.Var(&flPublish, "p", "Publish a container's port to the host (use 'docker port' to see the actual mapping)")
var flExpose utils.ListOpts
cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host")
var flEnv utils.ListOpts
cmd.Var(&flEnv, "e", "Set environment variables")
var flDns utils.ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
var flVolumesFrom utils.ListOpts
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
flEntrypoint := cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
var flLxcOpts utils.ListOpts
cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
var flLinks utils.ListOpts
cmd.Var(&flLinks, "link", "Add link to another container (name:alias)")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
if *flDetach && len(flAttach) > 0 {
return nil, nil, cmd, ErrConflictAttachDetach
}
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
return nil, nil, cmd, ErrInvalidWorikingDirectory
}
if *flDetach && *flAutoRemove {
return nil, nil, cmd, ErrConflictDetachAutoRemove
}
// If neither -d or -a are set, attach to everything by default
if len(flAttach) == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
envs := []string{}
for _, env := range flEnv {
arr := strings.Split(env, "=")
if len(arr) > 1 {
envs = append(envs, env)
} else {
v := os.Getenv(env)
envs = append(envs, env+"="+v)
}
}
var flMemory int64
if *flMemoryString != "" {
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
if err != nil {
return nil, nil, cmd, err
}
flMemory = parsedMemory
}
var binds []string
// add any bind targets to the list of container volumes
for bind := range flVolumes {
arr := strings.Split(bind, ":")
if len(arr) > 1 {
if arr[0] == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
}
dstDir := arr[1]
flVolumes[dstDir] = struct{}{}
binds = append(binds, bind)
delete(flVolumes, bind)
}
}
parsedArgs := cmd.Args()
runCmd := []string{}
entrypoint := []string{}
image := ""
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
if *flEntrypoint != "" {
entrypoint = []string{*flEntrypoint}
}
var lxcConf []KeyValuePair
lxcConf, err := parseLxcConfOpts(flLxcOpts)
if err != nil {
return nil, nil, cmd, err
}
hostname := *flHostname
domainname := ""
parts := strings.SplitN(hostname, ".", 2)
if len(parts) > 1 {
hostname = parts[0]
domainname = parts[1]
}
ports, portBindings, err := parsePortSpecs(flPublish)
if err != nil {
return nil, nil, cmd, err
}
// Merge in exposed ports to the map of published ports
for _, e := range flExpose {
if strings.Contains(e, ":") {
return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e)
}
p := NewPort(splitProtoPort(e))
if _, exists := ports[p]; !exists {
ports[p] = struct{}{}
}
}
config := &Config{
Hostname: hostname,
Domainname: domainname,
PortSpecs: nil, // Deprecated
ExposedPorts: ports,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: envs,
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: strings.Join(flVolumesFrom, ","),
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
LxcConf: lxcConf,
Privileged: *flPrivileged,
PortBindings: portBindings,
Links: flLinks,
PublishAllPorts: *flPublishAll,
}
if capabilities != nil && flMemory > 0 && !capabilities.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, hostConfig, cmd, nil
}
type PortMapping map[string]string // Deprecated
type NetworkSettings struct {
@@ -710,9 +503,10 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
}
func (container *Container) Start() (err error) {
container.State.Lock()
defer container.State.Unlock()
if container.State.Running {
container.Lock()
defer container.Unlock()
if container.State.IsRunning() {
return fmt.Errorf("The container %s is already running.", container.ID)
}
defer func() {
@@ -747,162 +541,18 @@ func (container *Container) Start() (err error) {
log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
}
// Create the requested bind mounts
binds := make(map[string]BindMap)
// Define illegal container destinations
illegalDsts := []string{"/", "."}
for _, bind := range container.hostConfig.Binds {
// FIXME: factorize bind parsing in parseBind
var src, dst, mode string
arr := strings.Split(bind, ":")
if len(arr) == 2 {
src = arr[0]
dst = arr[1]
mode = "rw"
} else if len(arr) == 3 {
src = arr[0]
dst = arr[1]
mode = arr[2]
} else {
return fmt.Errorf("Invalid bind specification: %s", bind)
}
// Bail if trying to mount to an illegal destination
for _, illegal := range illegalDsts {
if dst == illegal {
return fmt.Errorf("Illegal bind destination: %s", dst)
}
}
bindMap := BindMap{
SrcPath: src,
DstPath: dst,
Mode: mode,
}
binds[path.Clean(dst)] = bindMap
}
if container.Volumes == nil || len(container.Volumes) == 0 {
container.Volumes = make(map[string]string)
container.VolumesRW = make(map[string]bool)
}
// Apply volumes from another container if requested
if container.Config.VolumesFrom != "" {
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
for _, containerSpec := range containerSpecs {
mountRW := true
specParts := strings.SplitN(containerSpec, ":", 2)
switch len(specParts) {
case 0:
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
case 2:
switch specParts[1] {
case "ro":
mountRW = false
case "rw": // mountRW is already true
default:
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
}
}
c := container.runtime.Get(specParts[0])
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
continue
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return err
}
container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW && mountRW
}
}
}
if err := container.applyExternalVolumes(); err != nil {
return err
}
volumesDriver := container.runtime.volumes.driver
// Create the requested volumes if they don't exist
for volPath := range container.Config.Volumes {
volPath = path.Clean(volPath)
// Skip existing volumes
if _, exists := container.Volumes[volPath]; exists {
continue
}
var srcPath string
var isBindMount bool
srcRW := false
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
isBindMount = true
srcPath = bindMap.SrcPath
if strings.ToLower(bindMap.Mode) == "rw" {
srcRW = true
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
// Do not pass a container as the parameter for the volume creation.
// The graph driver using the container's information ( Image ) to
// create the parent.
c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
if err != nil {
return err
}
srcPath, err = volumesDriver.Get(c.ID)
if err != nil {
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
}
srcRW = true // RW by default
}
container.Volumes[volPath] = srcPath
container.VolumesRW[volPath] = srcRW
// Create the mountpoint
rootVolPath := path.Join(container.RootfsPath(), volPath)
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
return err
}
// Do not copy or change permissions if we are mounting from the host
if srcRW && !isBindMount {
volList, err := ioutil.ReadDir(rootVolPath)
if err != nil {
return err
}
if len(volList) > 0 {
srcList, err := ioutil.ReadDir(srcPath)
if err != nil {
return err
}
if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
return err
}
var stat syscall.Stat_t
if err := syscall.Stat(rootVolPath, &stat); err != nil {
return err
}
var srcStat syscall.Stat_t
if err := syscall.Stat(srcPath, &srcStat); err != nil {
return err
}
// Change the source volume's ownership if it differs from the root
// files that where just copied
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
}
}
}
}
if err := container.createVolumes(); err != nil {
return err
}
if err := container.generateLXCConfig(); err != nil {
@@ -924,7 +574,11 @@ func (container *Container) Start() (err error) {
// Networking
if !container.Config.NetworkDisabled {
params = append(params, "-g", container.network.Gateway.String())
network := container.NetworkSettings
params = append(params,
"-g", network.Gateway,
"-i", fmt.Sprintf("%s/%d", network.IPAddress, network.IPPrefixLen),
)
}
// User
@@ -936,7 +590,6 @@ func (container *Container) Start() (err error) {
env := []string{
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"container=lxc",
"HOSTNAME=" + container.Config.Hostname,
}
@@ -944,6 +597,10 @@ func (container *Container) Start() (err error) {
env = append(env, "TERM=xterm")
}
if container.hostConfig.Privileged {
params = append(params, "-privileged")
}
// Init any links between the parent and children
runtime := container.runtime
@@ -1046,7 +703,7 @@ func (container *Container) Start() (err error) {
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
container.State.SetRunning(container.cmd.Process.Pid)
// Init the lock
container.waitLock = make(chan struct{})
@@ -1054,14 +711,14 @@ func (container *Container) Start() (err error) {
container.ToDisk()
go container.monitor()
defer utils.Debugf("Container running: %v", container.State.Running)
defer utils.Debugf("Container running: %v", container.State.IsRunning())
// We wait for the container to be fully running.
// Timeout after 5 seconds. In case of broken pipe, just retry.
// Note: The container can run and finish correctly before
// the end of this loop
for now := time.Now(); time.Since(now) < 5*time.Second; {
// If the container dies while waiting for it, just return
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
output, err := exec.Command("lxc-info", "-s", "-n", container.ID).CombinedOutput()
@@ -1078,16 +735,208 @@ func (container *Container) Start() (err error) {
if strings.Contains(string(output), "RUNNING") {
return nil
}
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.Running, bytes.TrimSpace(output))
utils.Debugf("Waiting for the container to start (running: %v): %s", container.State.IsRunning(), bytes.TrimSpace(output))
time.Sleep(50 * time.Millisecond)
}
if container.State.Running {
if container.State.IsRunning() {
return ErrContainerStartTimeout
}
return ErrContainerStart
}
func (container *Container) getBindMap() (map[string]BindMap, error) {
// Create the requested bind mounts
binds := make(map[string]BindMap)
// Define illegal container destinations
illegalDsts := []string{"/", "."}
for _, bind := range container.hostConfig.Binds {
// FIXME: factorize bind parsing in parseBind
var src, dst, mode string
arr := strings.Split(bind, ":")
if len(arr) == 2 {
src = arr[0]
dst = arr[1]
mode = "rw"
} else if len(arr) == 3 {
src = arr[0]
dst = arr[1]
mode = arr[2]
} else {
return nil, fmt.Errorf("Invalid bind specification: %s", bind)
}
// Bail if trying to mount to an illegal destination
for _, illegal := range illegalDsts {
if dst == illegal {
return nil, fmt.Errorf("Illegal bind destination: %s", dst)
}
}
bindMap := BindMap{
SrcPath: src,
DstPath: dst,
Mode: mode,
}
binds[path.Clean(dst)] = bindMap
}
return binds, nil
}
func (container *Container) createVolumes() error {
binds, err := container.getBindMap()
if err != nil {
return err
}
volumesDriver := container.runtime.volumes.driver
// Create the requested volumes if they don't exist
for volPath := range container.Config.Volumes {
volPath = path.Clean(volPath)
volIsDir := true
// Skip existing volumes
if _, exists := container.Volumes[volPath]; exists {
continue
}
var srcPath string
var isBindMount bool
srcRW := false
// If an external bind is defined for this volume, use that as a source
if bindMap, exists := binds[volPath]; exists {
isBindMount = true
srcPath = bindMap.SrcPath
if strings.ToLower(bindMap.Mode) == "rw" {
srcRW = true
}
if stat, err := os.Lstat(bindMap.SrcPath); err != nil {
return err
} else {
volIsDir = stat.IsDir()
}
// Otherwise create an directory in $ROOT/volumes/ and use that
} else {
// Do not pass a container as the parameter for the volume creation.
// The graph driver using the container's information ( Image ) to
// create the parent.
c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
if err != nil {
return err
}
srcPath, err = volumesDriver.Get(c.ID)
if err != nil {
return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
}
srcRW = true // RW by default
}
container.Volumes[volPath] = srcPath
container.VolumesRW[volPath] = srcRW
// Create the mountpoint
volPath = path.Join(container.RootfsPath(), volPath)
rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.RootfsPath())
if err != nil {
panic(err)
}
if _, err := os.Stat(rootVolPath); err != nil {
if os.IsNotExist(err) {
if volIsDir {
if err := os.MkdirAll(rootVolPath, 0755); err != nil {
return err
}
} else {
if err := os.MkdirAll(path.Dir(rootVolPath), 0755); err != nil {
return err
}
if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil {
return err
} else {
f.Close()
}
}
}
}
// Do not copy or change permissions if we are mounting from the host
if srcRW && !isBindMount {
volList, err := ioutil.ReadDir(rootVolPath)
if err != nil {
return err
}
if len(volList) > 0 {
srcList, err := ioutil.ReadDir(srcPath)
if err != nil {
return err
}
if len(srcList) == 0 {
// If the source volume is empty copy files from the root into the volume
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
return err
}
var stat syscall.Stat_t
if err := syscall.Stat(rootVolPath, &stat); err != nil {
return err
}
var srcStat syscall.Stat_t
if err := syscall.Stat(srcPath, &srcStat); err != nil {
return err
}
// Change the source volume's ownership if it differs from the root
// files that where just copied
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
}
}
}
}
}
return nil
}
func (container *Container) applyExternalVolumes() error {
if container.Config.VolumesFrom != "" {
containerSpecs := strings.Split(container.Config.VolumesFrom, ",")
for _, containerSpec := range containerSpecs {
mountRW := true
specParts := strings.SplitN(containerSpec, ":", 2)
switch len(specParts) {
case 0:
return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
case 2:
switch specParts[1] {
case "ro":
mountRW = false
case "rw": // mountRW is already true
default:
return fmt.Errorf("Malformed volumes-from speficication: %s", containerSpec)
}
}
c := container.runtime.Get(specParts[0])
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
continue
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return err
}
container.Volumes[volPath] = id
if isRW, exists := c.VolumesRW[volPath]; exists {
container.VolumesRW[volPath] = isRW && mountRW
}
}
}
}
return nil
}
func (container *Container) Run() error {
if err := container.Start(); err != nil {
return err
@@ -1163,11 +1012,12 @@ func (container *Container) allocateNetwork() error {
return nil
}
var iface *NetworkInterface
var err error
if container.State.Ghost {
manager := container.runtime.networkManager
if manager.disabled {
var (
iface *NetworkInterface
err error
)
if container.State.IsGhost() {
if manager := container.runtime.networkManager; manager.disabled {
iface = &NetworkInterface{disabled: true}
} else {
iface = &NetworkInterface{
@@ -1203,10 +1053,12 @@ func (container *Container) allocateNetwork() error {
}
}
portSpecs := make(map[Port]struct{})
bindings := make(map[Port][]PortBinding)
var (
portSpecs = make(map[Port]struct{})
bindings = make(map[Port][]PortBinding)
)
if !container.State.Ghost {
if !container.State.IsGhost() {
if container.Config.ExposedPorts != nil {
portSpecs = container.Config.ExposedPorts
}
@@ -1315,7 +1167,7 @@ func (container *Container) monitor() {
}
// Report status back
container.State.setStopped(exitCode)
container.State.SetStopped(exitCode)
// Release the lock
close(container.waitLock)
@@ -1365,10 +1217,10 @@ func (container *Container) cleanup() {
}
func (container *Container) kill(sig int) error {
container.State.Lock()
defer container.State.Unlock()
container.Lock()
defer container.Unlock()
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
@@ -1381,7 +1233,7 @@ func (container *Container) kill(sig int) error {
}
func (container *Container) Kill() error {
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
@@ -1406,7 +1258,7 @@ func (container *Container) Kill() error {
}
func (container *Container) Stop(seconds int) error {
if !container.State.Running {
if !container.State.IsRunning() {
return nil
}
@@ -1440,7 +1292,7 @@ func (container *Container) Restart(seconds int) error {
// Wait blocks until the container stops running, then returns its exit code.
func (container *Container) Wait() int {
<-container.waitLock
return container.State.ExitCode
return container.State.GetExitCode()
}
func (container *Container) Resize(h, w int) error {
@@ -1575,14 +1427,10 @@ func (container *Container) GetSize() (int64, int64) {
}
}
_, err = os.Stat(container.RootfsPath())
if err == nil {
filepath.Walk(container.RootfsPath(), func(path string, fileInfo os.FileInfo, err error) error {
if fileInfo != nil {
sizeRootfs += fileInfo.Size()
}
return nil
})
if _, err = os.Stat(container.RootfsPath()); err != nil {
if sizeRootfs, err = utils.TreeSize(container.RootfsPath()); err != nil {
sizeRootfs = -1
}
}
return sizeRw, sizeRootfs
}
@@ -1617,3 +1465,13 @@ func (container *Container) Exposes(p Port) bool {
_, exists := container.Config.ExposedPorts[p]
return exists
}
func (container *Container) GetPtyMaster() (*os.File, error) {
if container.ptyMaster == nil {
return nil, ErrNoTTY
}
if pty, ok := container.ptyMaster.(*os.File); ok {
return pty, nil
}
return nil, ErrNotATTY
}

View File

@@ -1,11 +1,9 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
Requires=network.target
After=multi-user.target
After=network.target
[Service]
Type=simple
ExecStartPre=/bin/mount --make-rprivate /
ExecStart=/usr/bin/docker -d

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Create a CentOS base image for Docker
# From unclejack https://github.com/dotcloud/docker/issues/290
set -e
MIRROR_URL="http://centos.netnitco.net/6.4/os/x86_64/"
MIRROR_URL_UPDATES="http://centos.netnitco.net/6.4/updates/x86_64/"
yum install -y febootstrap xz
febootstrap -i bash -i coreutils -i tar -i bzip2 -i gzip -i vim-minimal -i wget -i patch -i diffutils -i iproute -i yum centos centos64 $MIRROR_URL -u $MIRROR_URL_UPDATES
touch centos64/etc/resolv.conf
touch centos64/sbin/init
tar --numeric-owner -Jcpf centos-64.tar.xz -C centos64 .

View File

@@ -142,14 +142,22 @@ if [ -z "$strictDebootstrap" ]; then
# this forces dpkg not to call sync() after package extraction and speeds up install
# the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
# we want to effectively run "apt-get clean" after every install to keep images small
echo 'DPkg::Post-Invoke {"/bin/rm -f /var/cache/apt/archives/*.deb || true";};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
# we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context)
{
aptGetClean='rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true'
echo 'DPkg::Post-Invoke { "'$aptGetClean'"; };'
echo 'APT::Update::Post-Invoke { "'$aptGetClean'"; };'
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";'
} | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
# and remove the translations, too
echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null
# helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
# rm /usr/sbin/policy-rc.d
# rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
# rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
# rm /etc/apt/apt.conf.d/no-cache
# rm /etc/apt/apt.conf.d/no-languages
if [ -z "$skipDetection" ]; then
# see also rudimentary platform detection in hack/install.sh

112
contrib/mkimage-rinse.sh Executable file
View File

@@ -0,0 +1,112 @@
#!/bin/bash
set -e
repo="$1"
distro="$2"
mirror="$3"
if [ ! "$repo" ] || [ ! "$distro" ]; then
self="$(basename $0)"
echo >&2 "usage: $self repo distro [mirror]"
echo >&2
echo >&2 " ie: $self username/centos centos-5"
echo >&2 " $self username/centos centos-6"
echo >&2
echo >&2 " ie: $self username/slc slc-5"
echo >&2 " $self username/slc slc-6"
echo >&2
echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/"
echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/"
echo >&2
echo >&2 'See /etc/rinse for supported values of "distro" and for examples of'
echo >&2 ' expected values of "mirror".'
echo >&2
echo >&2 'This script is tested to work with the original upstream version of rinse,'
echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at'
echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.'
echo >&2
exit 1
fi
target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
returnTo="$(pwd -P)"
rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" )
if [ "$mirror" ]; then
rinseArgs+=( --mirror "$mirror" )
fi
set -x
mkdir -p "$target"
sudo rinse "${rinseArgs[@]}"
cd "$target"
# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own
sudo rm -rf dev
sudo mkdir -m 755 dev
(
cd dev
sudo ln -sf /proc/self/fd ./
sudo mkdir -m 755 pts
sudo mkdir -m 1777 shm
sudo mknod -m 600 console c 5 1
sudo mknod -m 600 initctl p
sudo mknod -m 666 full c 1 7
sudo mknod -m 666 null c 1 3
sudo mknod -m 666 ptmx c 5 2
sudo mknod -m 666 random c 1 8
sudo mknod -m 666 tty c 5 0
sudo mknod -m 666 tty0 c 4 0
sudo mknod -m 666 urandom c 1 9
sudo mknod -m 666 zero c 1 5
)
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target"
# locales
sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
# docs
sudo rm -rf usr/share/{man,doc,info,gnome/help}
# cracklib
sudo rm -rf usr/share/cracklib
# i18n
sudo rm -rf usr/share/i18n
# yum cache
sudo rm -rf var/cache/yum
sudo mkdir -p --mode=0755 var/cache/yum
# sln
sudo rm -rf sbin/sln
# ldconfig
#sudo rm -rf sbin/ldconfig
sudo rm -rf etc/ld.so.cache var/cache/ldconfig
sudo mkdir -p --mode=0755 var/cache/ldconfig
# allow networking init scripts inside the container to work without extra steps
echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null
# to restore locales later:
# yum reinstall glibc-common
version=
if [ -r etc/redhat-release ]; then
version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)"
elif [ -r etc/SuSE-release ]; then
version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)"
fi
if [ -z "$version" ]; then
echo >&2 "warning: cannot autodetect OS version, using $distro as tag"
sleep 20
version="$distro"
fi
sudo tar --numeric-owner -c . | docker import - $repo:$version
docker run -i -t $repo:$version echo success
cd "$returnTo"
sudo rm -rf "$target"

77
contrib/mkseccomp.pl Executable file
View File

@@ -0,0 +1,77 @@
#!/usr/bin/perl
#
# A simple helper script to help people build seccomp profiles for
# Docker/LXC. The goal is mostly to reduce the attack surface to the
# kernel, by restricting access to rarely used, recently added or not used
# syscalls.
#
# This script processes one or more files which contain the list of system
# calls to be allowed. See mkseccomp.sample for more information how you
# can configure the list of syscalls. When run, this script produces output
# which, when stored in a file, can be passed to docker as follows:
#
# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
#
# The included sample file shows how to cut about a quarter of all syscalls,
# which affecting most applications.
#
# For specific situations it is possible to reduce the list further. By
# reducing the list to just those syscalls required by a certain application
# you can make it difficult for unknown/unexpected code to run.
#
# Run this script as follows:
#
# ./mkseccomp.pl < mkseccomp.sample >syscalls.list
# or
# ./mkseccomp.pl mkseccomp.sample >syscalls.list
#
# Multiple files can be specified, in which case the lists of syscalls are
# combined.
#
# By Martijn van Oosterhout <kleptog@svana.org> Nov 2013
# How it works:
#
# This program basically spawns two processes to form a chain like:
#
# <process data section to prefix __NR_> | cpp | <add header and filter unknown syscalls>
use strict;
use warnings;
if( -t ) {
print STDERR "Helper script to make seccomp filters for Docker/LXC.\n";
print STDERR "Usage: mkseccomp.pl [files...]\n";
exit 1;
}
my $pid = open(my $in, "-|") // die "Couldn't fork1 ($!)\n";
if($pid == 0) { # Child
$pid = open(my $out, "|-") // die "Couldn't fork2 ($!)\n";
if($pid == 0) { # Child, which execs cpp
exec "cpp" or die "Couldn't exec cpp ($!)\n";
exit 1;
}
# Process the DATA section and output to cpp
print $out "#include <sys/syscall.h>\n";
while(<>) {
if(/^\w/) {
print $out "__NR_$_";
}
}
close $out;
exit 0;
}
# Print header and then process output from cpp.
print "1\n";
print "whitelist\n";
while(<$in>) {
print if( /^[0-9]/ );
}

444
contrib/mkseccomp.sample Normal file
View File

@@ -0,0 +1,444 @@
/* This sample file is an example for mkseccomp.pl to produce a seccomp file
* which restricts syscalls that are only useful for an admin but allows the
* vast majority of normal userspace programs to run normally.
*
* The format of this file is one line per syscall. This is then processed
* and passed to 'cpp' to convert the names to numbers using whatever is
* correct for your platform. As such C-style comments are permitted. Note
* this also means that C preprocessor macros are also allowed. So it is
* possible to create groups surrounded by #ifdef/#endif and control their
* inclusion via #define (not #include).
*
* Syscalls that don't exist on your architecture are silently filtered out.
* Syscalls marked with (*) are required for a container to spawn a bash
* shell successfully (not necessarily full featured). Listing the same
* syscall multiple times is no problem.
*
* If you want to make a list specifically for one application the easiest
* way is to run the application under strace, like so:
*
* $ strace -f -q -c -o strace.out application args...
*
* Once you have a reasonable sample of the execution of the program, exit
* it. The file strace.out will have a summary of the syscalls used. Copy
* that list into this file, comment out everything else except the starred
* syscalls (which you need for the container to start) and you're done.
*
* To get the list of syscalls from the strace output this works well for
* me
*
* $ cut -c52 < strace.out
*
* This sample list was compiled as a combination of all the syscalls
* available on i386 and amd64 on Ubuntu Precise, as such it may not contain
* everything and not everything may be relevent for your system. This
* shouldn't be a problem.
*/
// Filesystem/File descriptor related
access // (*)
chdir // (*)
chmod
chown
chown32
close // (*)
creat
dup // (*)
dup2 // (*)
dup3
epoll_create
epoll_create1
epoll_ctl
epoll_ctl_old
epoll_pwait
epoll_wait
epoll_wait_old
eventfd
eventfd2
faccessat // (*)
fadvise64
fadvise64_64
fallocate
fanotify_init
fanotify_mark
ioctl // (*)
fchdir
fchmod
fchmodat
fchown
fchown32
fchownat
fcntl // (*)
fcntl64
fdatasync
fgetxattr
flistxattr
flock
fremovexattr
fsetxattr
fstat // (*)
fstat64
fstatat64
fstatfs
fstatfs64
fsync
ftruncate
ftruncate64
getcwd // (*)
getdents // (*)
getdents64
getxattr
inotify_add_watch
inotify_init
inotify_init1
inotify_rm_watch
io_cancel
io_destroy
io_getevents
io_setup
io_submit
lchown
lchown32
lgetxattr
link
linkat
listxattr
llistxattr
llseek
_llseek
lremovexattr
lseek // (*)
lsetxattr
lstat
lstat64
mkdir
mkdirat
mknod
mknodat
newfstatat
_newselect
oldfstat
oldlstat
oldolduname
oldstat
olduname
oldwait4
open // (*)
openat // (*)
pipe // (*)
pipe2
poll
ppoll
pread64
preadv
futimesat
pselect6
pwrite64
pwritev
read // (*)
readahead
readdir
readlink
readlinkat
readv
removexattr
rename
renameat
rmdir
select
sendfile
sendfile64
setxattr
splice
stat // (*)
stat64
statfs // (*)
statfs64
symlink
symlinkat
sync
sync_file_range
sync_file_range2
syncfs
tee
truncate
truncate64
umask
unlink
unlinkat
ustat
utime
utimensat
utimes
write // (*)
writev
// Network related
accept
accept4
bind // (*)
connect // (*)
getpeername
getsockname // (*)
getsockopt
listen
recv
recvfrom // (*)
recvmmsg
recvmsg
send
sendmmsg
sendmsg
sendto // (*)
setsockopt
shutdown
socket // (*)
socketcall
socketpair
// Signal related
pause
rt_sigaction // (*)
rt_sigpending
rt_sigprocmask // (*)
rt_sigqueueinfo
rt_sigreturn // (*)
rt_sigsuspend
rt_sigtimedwait
rt_tgsigqueueinfo
sigaction
sigaltstack // (*)
signal
signalfd
signalfd4
sigpending
sigprocmask
sigreturn
sigsuspend
// Other needed POSIX
alarm
brk // (*)
clock_adjtime
clock_getres
clock_gettime
clock_nanosleep
//clock_settime
gettimeofday
nanosleep
nice
sysinfo
syslog
time
timer_create
timer_delete
timerfd_create
timerfd_gettime
timerfd_settime
timer_getoverrun
timer_gettime
timer_settime
times
uname // (*)
// Memory control
madvise
mbind
mincore
mlock
mlockall
mmap // (*)
mmap2
mprotect // (*)
mremap
msync
munlock
munlockall
munmap // (*)
remap_file_pages
set_mempolicy
vmsplice
// Process control
capget
//capset
clone // (*)
execve // (*)
exit // (*)
exit_group // (*)
fork
getcpu
getpgid
getpgrp // (*)
getpid // (*)
getppid // (*)
getpriority
getresgid
getresgid32
getresuid
getresuid32
getrlimit // (*)
getrusage
getsid
getuid // (*)
getuid32
getegid // (*)
getegid32
geteuid // (*)
geteuid32
getgid // (*)
getgid32
getgroups
getgroups32
getitimer
get_mempolicy
kill
//personality
prctl
prlimit64
sched_getaffinity
sched_getparam
sched_get_priority_max
sched_get_priority_min
sched_getscheduler
sched_rr_get_interval
//sched_setaffinity
//sched_setparam
//sched_setscheduler
sched_yield
setfsgid
setfsgid32
setfsuid
setfsuid32
setgid
setgid32
setgroups
setgroups32
setitimer
setpgid // (*)
setpriority
setregid
setregid32
setresgid
setresgid32
setresuid
setresuid32
setreuid
setreuid32
setrlimit
setsid
setuid
setuid32
ugetrlimit
vfork
wait4 // (*)
waitid
waitpid
// IPC
ipc
mq_getsetattr
mq_notify
mq_open
mq_timedreceive
mq_timedsend
mq_unlink
msgctl
msgget
msgrcv
msgsnd
semctl
semget
semop
semtimedop
shmat
shmctl
shmdt
shmget
// Linux specific, mostly needed for thread-related stuff
arch_prctl // (*)
get_robust_list
get_thread_area
gettid
futex // (*)
restart_syscall // (*)
set_robust_list // (*)
set_thread_area
set_tid_address // (*)
tgkill
tkill
// Admin syscalls, these are blocked
//acct
//adjtimex
//bdflush
//chroot
//create_module
//delete_module
//get_kernel_syms // Obsolete
//idle // Obsolete
//init_module
//ioperm
//iopl
//ioprio_get
//ioprio_set
//kexec_load
//lookup_dcookie // oprofile only?
//migrate_pages // NUMA
//modify_ldt
//mount
//move_pages // NUMA
//name_to_handle_at // NFS server
//nfsservctl // NFS server
//open_by_handle_at // NFS server
//perf_event_open
//pivot_root
//process_vm_readv // For debugger
//process_vm_writev // For debugger
//ptrace // For debugger
//query_module
//quotactl
//reboot
//setdomainname
//sethostname
//setns
//settimeofday
//sgetmask // Obsolete
//ssetmask // Obsolete
//stime
//swapoff
//swapon
//_sysctl
//sysfs
//sys_setaltroot
//umount
//umount2
//unshare
//uselib
//vhangup
//vm86
//vm86old
// Kernel key management
//add_key
//keyctl
//request_key
// Unimplemented
//afs_syscall
//break
//ftime
//getpmsg
//gtty
//lock
//madvise1
//mpx
//prof
//profil
//putpmsg
//security
//stty
//tuxcall
//ulimit
//vserver

View File

@@ -0,0 +1,3 @@
# hide docker's loopback devices from udisks, and thus from user desktops
SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"
SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1"

View File

@@ -17,3 +17,34 @@ meaning you can use Vagrant to control Docker containers.
* [docker-provider](https://github.com/fgrehm/docker-provider)
* [vagrant-shell](https://github.com/destructuring/vagrant-shell)
## Setting up Vagrant-docker with the Remote API
The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this:
```
description "Docker daemon"
start on filesystem and started lxc-net
stop on runlevel [!2345]
respawn
script
/usr/bin/docker -d -H=tcp://0.0.0.0:4243/
end script
```
Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal:
```
ssh -L 4243:localhost:4243 -p 2222 vagrant@localhost
```
(The first 4243 is what your host can connect to, the second 4243 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.)
Note that because the port has been changed, to run docker commands from within the command line you must run them like this:
```
sudo docker -H 0.0.0.0:4243 < commands for docker >
```

1
contrib/zfs/MAINTAINERS Normal file
View File

@@ -0,0 +1 @@
Gurjeet Singh <gurjeet@singh.im> (gurjeet.singh.im)

23
contrib/zfs/README.md Normal file
View File

@@ -0,0 +1,23 @@
# ZFS Storage Driver
This is a placeholder to declare the presence and status of ZFS storage driver
for containers.
The current development is done in Gurjeet Singh's fork of Docker, under the
branch named [zfs_driver].
[zfs_driver]: https://github.com/gurjeet/docker/tree/zfs_driver
# Status
Alpha: The code is now capable of creating, running and destroying containers
and images.
The code is under development. Contributions in the form of suggestions,
code-reviews, and patches are welcome.
Please send the communication to gurjeet@singh.im and CC at least one Docker
mailing list.

View File

@@ -23,22 +23,26 @@ func main() {
sysinit.SysInit()
return
}
// FIXME: Switch d and D ? (to be more sshd like)
flVersion := flag.Bool("v", false, "Print version information and quit")
flDaemon := flag.Bool("d", false, "Daemon mode")
flDebug := flag.Bool("D", false, "Debug mode")
flAutoRestart := flag.Bool("r", true, "Restart previously running containers")
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge. Use 'none' to disable container networking")
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
flRoot := flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime.")
flEnableCors := flag.Bool("api-enable-cors", false, "Enable CORS requests in the remote api.")
flDns := flag.String("dns", "", "Set custom dns servers")
flHosts := utils.ListOpts{fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET)}
flag.Var(&flHosts, "H", "tcp://host:port to bind/connect to or unix://path/to/socket to use")
flEnableIptables := flag.Bool("iptables", true, "Disable iptables within docker")
flDefaultIp := flag.String("ip", "0.0.0.0", "Default ip address to use when binding a containers ports")
flInterContainerComm := flag.Bool("icc", true, "Enable inter-container communication")
flGraphDriver := flag.String("graph-driver", "", "For docker to use a specific graph driver")
var (
flVersion = flag.Bool("v", false, "Print version information and quit")
flDaemon = flag.Bool("d", false, "Enable daemon mode")
flDebug = flag.Bool("D", false, "Enable debug mode")
flAutoRestart = flag.Bool("r", true, "Restart previously running containers")
bridgeName = flag.String("b", "", "Attach containers to a pre-existing network bridge; use 'none' to disable container networking")
bridgeIp = flag.String("bip", "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
pidfile = flag.String("p", "/var/run/docker.pid", "Path to use for daemon PID file")
flRoot = flag.String("g", "/var/lib/docker", "Path to use as the root of the docker runtime")
flEnableCors = flag.Bool("api-enable-cors", false, "Enable CORS headers in the remote API")
flDns = docker.NewListOpts(docker.ValidateIp4Address)
flEnableIptables = flag.Bool("iptables", true, "Disable docker's addition of iptables rules")
flDefaultIp = flag.String("ip", "0.0.0.0", "Default IP address to use when binding container ports")
flInterContainerComm = flag.Bool("icc", true, "Enable inter-container communication")
flGraphDriver = flag.String("s", "", "Force the docker runtime to use a specific storage driver")
flHosts = docker.NewListOpts(docker.ValidateHost)
)
flag.Var(&flDns, "dns", "Force docker to use specific DNS servers")
flag.Var(&flHosts, "H", "Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise")
flag.Parse()
@@ -46,16 +50,13 @@ func main() {
showVersion()
return
}
if len(flHosts) > 1 {
flHosts = flHosts[1:] //trick to display a nice default value in the usage
if flHosts.Len() == 0 {
// If we do not have a host, default to unix socket
flHosts.Set(fmt.Sprintf("unix://%s", docker.DEFAULTUNIXSOCKET))
}
for i, flHost := range flHosts {
host, err := utils.ParseHost(docker.DEFAULTHTTPHOST, docker.DEFAULTHTTPPORT, flHost)
if err == nil {
flHosts[i] = host
} else {
log.Fatal(err)
}
if *bridgeName != "" && *bridgeIp != "" {
log.Fatal("You specified -b & -bip, mutually exclusive options. Please specify only one.")
}
if *flDebug {
@@ -78,9 +79,10 @@ func main() {
job.Setenv("Root", *flRoot)
job.SetenvBool("AutoRestart", *flAutoRestart)
job.SetenvBool("EnableCors", *flEnableCors)
job.Setenv("Dns", *flDns)
job.SetenvList("Dns", flDns.GetAll())
job.SetenvBool("EnableIptables", *flEnableIptables)
job.Setenv("BridgeIface", *bridgeName)
job.Setenv("BridgeIp", *bridgeIp)
job.Setenv("DefaultIp", *flDefaultIp)
job.SetenvBool("InterContainerCommunication", *flInterContainerComm)
job.Setenv("GraphDriver", *flGraphDriver)
@@ -88,19 +90,22 @@ func main() {
log.Fatal(err)
}
// Serve api
job = eng.Job("serveapi", flHosts...)
job = eng.Job("serveapi", flHosts.GetAll()...)
job.SetenvBool("Logging", true)
if err := job.Run(); err != nil {
log.Fatal(err)
}
} else {
if len(flHosts) > 1 {
if flHosts.Len() > 1 {
log.Fatal("Please specify only one -H")
}
protoAddrParts := strings.SplitN(flHosts[0], "://", 2)
protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2)
if err := docker.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
if sterr, ok := err.(*utils.StatusError); ok {
os.Exit(sterr.Status)
if sterr.Status != "" {
log.Println(sterr.Status)
}
os.Exit(sterr.StatusCode)
}
log.Fatal(err)
}

View File

@@ -9,7 +9,7 @@ run apt-get install -y python-setuptools make
run easy_install pip
#from docs/requirements.txt, but here to increase cacheability
run pip install Sphinx==1.1.3
run pip install sphinxcontrib-httpdomain==1.1.8
run pip install sphinxcontrib-httpdomain==1.1.9
add . /docs
run cd /docs; make docs

View File

@@ -1,2 +1,4 @@
Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
Ken Cochrane <ken@dotcloud.com> (@kencochrane)
James Turnbull <james@lovedthanlost.net> (@jamtur01)
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)

View File

@@ -41,24 +41,25 @@ its dependencies. There are two main ways to install this tool:
###Native Installation
* Install sphinx: `pip install sphinx`
* Mac OS X: `[sudo] pip-2.7 install sphinx`
* Install sphinx httpdomain contrib package: `pip install sphinxcontrib-httpdomain`
* Mac OS X: `[sudo] pip-2.7 install sphinxcontrib-httpdomain`
* If pip is not available you can probably install it using your favorite package manager as **python-pip**
Install dependencies from `requirements.txt` file in your `docker/docs`
directory:
* Linux: `pip install -r docs/requirements.txt`
* Mac OS X: `[sudo] pip-2.7 -r docs/requirements.txt`
###Alternative Installation: Docker Container
If you're running ``docker`` on your development machine then you may
find it easier and cleaner to use the Dockerfile. This installs Sphinx
find it easier and cleaner to use the docs Dockerfile. This installs Sphinx
in a container, adds the local ``docs/`` directory and builds the HTML
docs inside the container, even starting a simple HTTP server on port
8000 so that you can connect and see your changes. Just run ``docker
build .`` and run the resulting image. This is the equivalent to
``make clean server`` since each container starts clean.
8000 so that you can connect and see your changes.
In the ``docs/`` directory, run:
```docker build -t docker:docs . && docker run -p 8000:8000 docker:docs```
In the ``docker`` source directory, run:
```make docs```
This is the equivalent to ``make clean server`` since each container starts clean.
Usage
-----
@@ -127,7 +128,8 @@ Guides on using sphinx
* Code examples
* Start without $, so it's easy to copy and paste.
* Start typed commands with ``$ `` (dollar space) so that they
are easily differentiated from program output.
* Use "sudo" with docker to ensure that your command is runnable
even if they haven't [used the *docker*
group](http://docs.docker.io/en/latest/use/basics/#why-sudo).
@@ -136,7 +138,7 @@ Manpages
--------
* To make the manpages, run ``make man``. Please note there is a bug
in spinx 1.1.3 which makes this fail. Upgrade to the latest version
in Sphinx 1.1.3 which makes this fail. Upgrade to the latest version
of Sphinx.
* Then preview the manpage by running ``man _build/man/docker.1``,
where ``_build/man/docker.1`` is the path to the generated manfile

View File

@@ -34,6 +34,35 @@ Calling /images/<name>/insert is the same as calling
You can still call an old version of the api using
/v1.0/images/<name>/insert
v1.8
****
Full Documentation
------------------
:doc:`docker_remote_api_v1.8`
What's new
----------
.. http:post:: /build
**New!** This endpoint now returns build status as json stream. In case
of a build error, it returns the exit status of the failed command.
.. http:get:: /containers/(id)/json
**New!** This endpoint now returns the host config for the container.
.. http:post:: /images/create
.. http:post:: /images/(name)/insert
.. http:post:: /images/(name)/push
**New!** progressDetail object was added in the JSON. It's now possible
to get the current value and the total of the progress without having to
parse the string.
v1.7
****

View File

@@ -132,7 +132,9 @@ Create a container
],
"Dns":null,
"Image":"base",
"Volumes":{},
"Volumes":{
"/tmp": {}
},
"VolumesFrom":"",
"WorkingDir":""
@@ -361,8 +363,12 @@ Start a container
{
"Binds":["/tmp:/tmp"],
"LxcConf":{"lxc.utsname":"docker"}
"LxcConf":{"lxc.utsname":"docker"},
"PortBindings":null
"PublishAllPorts":false
}
Binds need to reference Volumes that were defined during container creation.
**Example response**:
@@ -990,10 +996,10 @@ Build an image from Dockerfile via stdin
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{{ STREAM }}
The stream must be a tar archive compressed with one of the
following algorithms: identity (no compression), gzip, bzip2,
xz.
@@ -1171,6 +1177,53 @@ Monitor Docker's events
:statuscode 200: no error
:statuscode 500: server error
Get a tarball containing all images and tags in a repository
************************************************************
.. http:get:: /images/(name)/get
Get a tarball containing all images and metadata for the repository specified by ``name``.
**Example request**
.. sourcecode:: http
GET /images/ubuntu/get
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/x-tar
Binary data stream
:statuscode 200: no error
:statuscode 500: server error
Load a tarball with a set of images and tags into docker
********************************************************
.. http:post:: /images/load
Load a set of images and tags into the docker repository.
**Example request**
.. sourcecode:: http
POST /images/load
Tarball in body
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
:statuscode 200: no error
:statuscode 500: server error
3. Going further
================

File diff suppressed because it is too large Load Diff

View File

@@ -37,7 +37,7 @@ We expect that there will be only one instance of the index, run and managed by
- It delegates authentication and authorization to the Index Auth service using tokens
- It supports different storage backends (S3, cloud files, local FS)
- It doesnt have a local database
- It will be open-sourced at some point
- `Source Code <https://github.com/dotcloud/docker-registry>`_
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
@@ -46,10 +46,6 @@ We expect that there will be multiple registries out there. To help to grasp the
- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotClouds control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
.. note::
Mirror registries and private registries which do not use the Index dont even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
.. note::
The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):

View File

@@ -1,4 +1,4 @@
:title: Registry API
:title: Remote API Client Libraries
:description: Various client libraries available to use with the Docker remote API
:keywords: API, Docker, index, registry, REST, documentation, clients, Python, Ruby, Javascript, Erlang, Go

View File

@@ -18,6 +18,39 @@ To list available commands, either run ``docker`` with no parameters or execute
...
.. _cli_daemon:
``daemon``
----------
::
Usage of docker:
-D=false: Enable debug mode
-H=[unix:///var/run/docker.sock]: Multiple tcp://host:port or unix://path/to/socket to bind in daemon mode, single connection otherwise
-api-enable-cors=false: Enable CORS headers in the remote API
-b="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
-bip="": Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of -b
-d=false: Enable daemon mode
-dns="": Force docker to use specific DNS servers
-g="/var/lib/docker": Path to use as the root of the docker runtime
-icc=true: Enable inter-container communication
-ip="0.0.0.0": Default IP address to use when binding container ports
-iptables=true: Disable docker's addition of iptables rules
-p="/var/run/docker.pid": Path to use for daemon PID file
-r=true: Restart previously running containers
-s="": Force the docker runtime to use a specific storage driver
-v=false: Print version information and quit
The docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
daemon and client. To run the daemon you provide the ``-d`` flag.
To force docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``
To set the dns server for all docker containers, use ``docker -d -dns 8.8.8.8``
To run the daemon with debug output, use ``docker -d -D``
.. _cli_attach:
``attach``
@@ -34,7 +67,8 @@ To list available commands, either run ``docker`` with no parameters or execute
You can detach from the container again (and leave it running) with
``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
the Docker client when it quits.
the Docker client when it quits. When you detach from the container's
process the exit code will be retuned to the client.
To stop a container, use ``docker stop``
@@ -88,35 +122,69 @@ Examples:
Usage: docker build [OPTIONS] PATH | URL | -
Build a new container image from the source code at PATH
-t="": Repository name (and optionally a tag) to be applied to the resulting image in case of success.
-t="": Repository name (and optionally a tag) to be applied
to the resulting image in case of success.
-q=false: Suppress verbose build output.
-no-cache: Do not use the cache when building the image.
-rm: Remove intermediate containers after a successful build
When a single Dockerfile is given as URL, then no context is set. When a git repository is set as URL, the repository is used as context
The files at PATH or URL are called the "context" of the build. The
build process may refer to any of the files in the context, for
example when using an :ref:`ADD <dockerfile_add>` instruction. When a
single ``Dockerfile`` is given as URL, then no context is set. When a
git repository is set as URL, then the repository is used as the
context
.. _cli_build_examples:
.. seealso:: :ref:`dockerbuilder`.
Examples:
~~~~~~~~~
.. code-block:: bash
sudo docker build .
$ sudo docker build .
Uploading context 10240 bytes
Step 1 : FROM busybox
Pulling repository busybox
---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/
Step 2 : RUN ls -lh /
---> Running in 9c9e81692ae9
total 24
drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin
drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev
drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib
lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib
dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc
lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin
dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys
drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr
---> b35f4035db3f
Step 3 : CMD echo Hello World
---> Running in 02071fceb21b
---> f52f38b7823e
Successfully built f52f38b7823e
This will read the ``Dockerfile`` from the current directory. It will
also send any other files and directories found in the current
directory to the ``docker`` daemon.
This example specifies that the PATH is ``.``, and so all the files in
the local directory get tar'd and sent to the Docker daemon. The PATH
specifies where to find the files for the "context" of the build on
the Docker daemon. Remember that the daemon could be running on a
remote machine and that no parsing of the Dockerfile happens at the
client side (where you're running ``docker build``). That means that
*all* the files at PATH get sent, not just the ones listed to
:ref:`ADD <dockerfile_add>` in the ``Dockerfile``.
The transfer of context from the local machine to the Docker daemon is
what the ``docker`` client means when you see the "Uploading context"
message.
The contents of this directory would be used by ``ADD`` commands found
within the ``Dockerfile``. This will send a lot of data to the
``docker`` daemon if the current directory contains a lot of data. If
the absolute path is provided instead of ``.`` then only the files and
directories required by the ADD commands from the ``Dockerfile`` will be
added to the context and transferred to the ``docker`` daemon.
.. code-block:: bash
sudo docker build -t vieux/apache:2.0 .
$ sudo docker build -t vieux/apache:2.0 .
This will build like the previous example, but it will then tag the
resulting image. The repository name will be ``vieux/apache`` and the
@@ -125,20 +193,19 @@ tag will be ``2.0``
.. code-block:: bash
sudo docker build - < Dockerfile
$ sudo docker build - < Dockerfile
This will read a ``Dockerfile`` from *stdin* without context. Due to
the lack of a context, no contents of any local directory will be sent
to the ``docker`` daemon. ``ADD`` doesn't work when running in this
mode because the absence of the context provides no source files to
copy to the container.
to the ``docker`` daemon. Since there is no context, a Dockerfile
``ADD`` only works if it refers to a remote URL.
.. code-block:: bash
sudo docker build github.com/creack/docker-firefox
$ sudo docker build github.com/creack/docker-firefox
This will clone the Github repository and use it as context. The
``Dockerfile`` at the root of the repository is used as
This will clone the Github repository and use the cloned repository as
context. The ``Dockerfile`` at the root of the repository is used as
``Dockerfile``. Note that you can specify an arbitrary git repository
by using the ``git://`` schema.
@@ -157,27 +224,52 @@ by using the ``git://`` schema.
-m="": Commit message
-author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
-run="": Configuration to be applied when the image is launched with `docker run`.
(ex: '{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
Simple commit of an existing container
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _cli_commit_examples:
Commit an existing container
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
$ docker ps
$ sudo docker ps
ID IMAGE COMMAND CREATED STATUS PORTS
c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
$ docker commit c3f279d17e0a SvenDowideit/testimage:version3
f5283438590d
$ docker images | head
REPOSITORY TAG ID CREATED SIZE
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 204.2 MB (virtual 335.7 MB)
S
REPOSITORY TAG ID CREATED VIRTUAL SIZE
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB
Change the command that a container runs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes you have an application container running just a service and you need
to make a quick change (run bash?) and then change it back.
In this example, we run a container with ``ls`` and then change the image to
run ``ls /etc``.
.. code-block:: bash
$ docker run -t -name test ubuntu ls
bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var
$ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2
933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb
$ docker run -t test2
adduser.conf gshadow login.defs rc0.d
alternatives gshadow- logrotate.d rc1.d
apt host.conf lsb-base rc2.d
...
Full -run example
.................
The ``-run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
or ``config`` when running ``docker inspect IMAGEID``.
(multiline is ok within a single quote ``'``)
::
@@ -219,10 +311,15 @@ Full -run example
::
Usage: docker cp CONTAINER:RESOURCE HOSTPATH
Usage: docker cp CONTAINER:PATH HOSTPATH
Copy files/folders from the containers filesystem to the host
path. Paths are relative to the root of the filesystem.
.. code-block:: bash
$ sudo docker cp 7bb0e258aefe:/etc/debian_version .
$ sudo docker cp blue_frog:/etc/hosts .
.. _cli_diff:
@@ -331,7 +428,13 @@ Show events in the past from a specified time
Usage: docker export CONTAINER
Export the contents of a filesystem as a tar archive
Export the contents of a filesystem as a tar archive to STDOUT
for example:
.. code-block:: bash
$ sudo docker export red_panda > latest.tar
.. _cli_history:
@@ -392,18 +495,52 @@ To see how the docker:latest image was built:
List images
-a=false: show all images
-a=false: show all images (by default filter out the intermediate images used to build)
-notrunc=false: Don't truncate output
-q=false: only show numeric IDs
-tree=false: output graph in tree format
-viz=false: output graph in graphviz format
Listing the most recently created images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
$ sudo docker images | head
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
<none> <none> 77af4d6b9913 19 hours ago 1.089 GB
committest latest b6fa739cedf5 19 hours ago 1.089 GB
<none> <none> 78a85c484f71 19 hours ago 1.089 GB
docker latest 30557a29d5ab 20 hours ago 1.089 GB
<none> <none> 0124422dd9f9 20 hours ago 1.089 GB
<none> <none> 18ad6fad3402 22 hours ago 1.082 GB
<none> <none> f9f1e26352f0 23 hours ago 1.089 GB
tryout latest 2629d1fa0b81 23 hours ago 131.5 MB
<none> <none> 5ed6274db6ce 24 hours ago 1.089 GB
Listing the full length image IDs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: bash
$ sudo docker images -notrunc | head
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
<none> <none> 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB
<none> <none> 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB
docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB
<none> <none> 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB
<none> <none> 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB
<none> <none> f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB
tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB
<none> <none> 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB
Displaying images visually
~~~~~~~~~~~~~~~~~~~~~~~~~~
::
.. code-block:: bash
sudo docker images -viz | dot -Tpng -o docker.png
$ sudo docker images -viz | dot -Tpng -o docker.png
.. image:: docker_images.gif
:alt: Example inheritance graph of Docker images.
@@ -412,9 +549,9 @@ Displaying images visually
Displaying image hierarchy
~~~~~~~~~~~~~~~~~~~~~~~~~~
::
.. code-block:: bash
sudo docker images -tree
$ sudo docker images -tree
|─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
└─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
@@ -447,7 +584,8 @@ Displaying image hierarchy
Usage: docker import URL|- [REPOSITORY[:TAG]]
Create a new filesystem image from the contents of a tarball
Create an empty filesystem image and import the contents of the tarball
(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
At this time, the URL must start with ``http`` and point to a single
file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) containing a
@@ -519,6 +657,12 @@ might not get preserved.
Insert a file from URL in the IMAGE at PATH
Use the specified IMAGE as the parent for a new image which adds a
:ref:`layer <layer_def>` containing the new file. ``insert`` does not modify
the original image, and the new image has the contents of the parent image,
plus the new file.
Examples
~~~~~~~~
@@ -528,6 +672,7 @@ Insert file from github
.. code-block:: bash
$ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh
06fd35556d7b
.. _cli_inspect:
@@ -540,6 +685,52 @@ Insert file from github
Return low-level information on a container
-format="": template to output results
By default, this will render all results in a JSON array. If a format
is specified, the given template will be executed for each result.
Go's `text/template <http://golang.org/pkg/text/template/>` package
describes all the details of the format.
Examples
~~~~~~~~
Get an instance's IP Address
............................
For the most part, you can pick out any field from the JSON in a
fairly straightforward manner.
.. code-block:: bash
$ sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $INSTANCE_ID
List All Port Bindings
......................
One can loop over arrays and maps in the results to produce simple
text output:
.. code-block:: bash
$ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
Find a Specific Port Mapping
............................
The ``.Field`` syntax doesn't work when the field name begins with a
number, but the template language's ``index`` function does. The
``.NetworkSettings.Ports`` section contains a map of the internal port
mappings to a list of external address/port objects, so to grab just
the numeric public port, you use ``index`` to find the specific port
map, and then ``index`` 0 contains first object inside of that. Then
we ask for the ``HostPort`` field to get the public address.
.. code-block:: bash
$ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
.. _cli_kill:
``kill``
@@ -559,6 +750,18 @@ Known Issues (kill)
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
.. _cli_load:
``load``
--------
::
Usage: docker load < repository.tar
Loads a tarred repository from the standard input stream.
Restores both images and tags.
.. _cli_login:
``login``
@@ -592,6 +795,15 @@ Known Issues (kill)
Fetch the logs of a container
``docker logs`` is a convenience which batch-retrieves whatever logs
are present at the time of execution. This does not guarantee
execution order when combined with a ``docker run`` (i.e. your run may
not have generated any logs at the time you execute ``docker logs``).
``docker logs -f`` combines ``docker logs`` and ``docker attach``: it
will first return all logs from the beginning and then continue
streaming new output from the container's stdout and stderr.
.. _cli_port:
@@ -620,6 +832,15 @@ Known Issues (kill)
-notrunc=false: Don't truncate output
-q=false: Only display numeric IDs
Running ``docker ps`` showing 2 linked containers.
.. code-block:: bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
.. _cli_pull:
``pull``
@@ -668,7 +889,7 @@ Known Issues (kill)
-link="": Remove the link instead of the actual container
Known Issues (rm)
~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~
* :issue:`197` indicates that ``docker kill`` may leave directories
behind and make it difficult to remove the container.
@@ -679,7 +900,7 @@ Examples:
.. code-block:: bash
$ docker rm /redis
$ sudo docker rm /redis
/redis
@@ -688,7 +909,7 @@ This will remove the container referenced under the link ``/redis``.
.. code-block:: bash
$ docker rm -link /webapp/redis
$ sudo docker rm -link /webapp/redis
/webapp/redis
@@ -697,7 +918,7 @@ network communication.
.. code-block:: bash
$ docker rm `docker ps -a -q`
$ sudo docker rm `docker ps -a -q`
This command will delete all stopped containers. The command ``docker ps -a -q`` will return all
@@ -714,6 +935,38 @@ containers will not be deleted.
Usage: docker rmi IMAGE [IMAGE...]
Remove one or more images
Removing tagged images
~~~~~~~~~~~~~~~~~~~~~~
Images can be removed either by their short or long ID's, or their image names.
If an image has more than one name, each of them needs to be removed before the
image is removed.
.. code-block:: bash
$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
$ sudo docker rmi fd484f19954f
Error: Conflict, fd484f19954f wasn't deleted
2013/12/11 05:47:16 Error: failed to remove one or more images
$ sudo docker rmi test1
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
$ sudo docker rmi test2
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
$ sudo docker rmi test
Untagged: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
.. _cli_run:
@@ -751,13 +1004,27 @@ containers will not be deleted.
-link="": Add link to another container (name:alias)
-name="": Assign the specified name to the container. If no name is specific docker will generate a random name
-P=false: Publish all exposed ports to the host interfaces
``'docker run'`` first ``'creates'`` a writeable container layer over
the specified image, and then ``'starts'`` it using the specified
command. That is, ``'docker run'`` is equivalent to the API
``/containers/create`` then ``/containers/(id)/start``.
Examples
--------
``docker run`` can be used in combination with ``docker commit`` to :ref:`change the command that a container runs <cli_commit_examples>`.
Known Issues (run -volumes-from)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
issue for a workaround.
Examples:
~~~~~~~~~
.. code-block:: bash
sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
$ sudo docker run -cidfile /tmp/docker_test.cid ubuntu echo "test"
This will create a container and print "test" to the console. The
``cidfile`` flag makes docker attempt to create a new file and write the
@@ -766,7 +1033,10 @@ error. Docker will close this file when docker run exits.
.. code-block:: bash
docker run mount -t tmpfs none /var/spool/squid
$ sudo docker run -t -i -rm ubuntu bash
root@bc338942ef20:/# mount -t tmpfs none /mnt
mount: permission denied
This will *not* work, because by default, most potentially dangerous
kernel capabilities are dropped; including ``cap_sys_admin`` (which is
@@ -775,7 +1045,12 @@ allow it to run:
.. code-block:: bash
docker run -privileged mount -t tmpfs none /var/spool/squid
$ sudo docker run -privileged ubuntu bash
root@50e3f57e16e6:/# mount -t tmpfs none /mnt
root@50e3f57e16e6:/# df -h
Filesystem Size Used Avail Use% Mounted on
none 1.9G 0 1.9G 0% /mnt
The ``-privileged`` flag gives *all* capabilities to the container,
and it also lifts all the limitations enforced by the ``device``
@@ -785,7 +1060,7 @@ use-cases, like running Docker within Docker.
.. code-block:: bash
docker run -w /path/to/dir/ -i -t ubuntu pwd
$ sudo docker run -w /path/to/dir/ -i -t ubuntu pwd
The ``-w`` lets the command being executed inside directory given,
here /path/to/dir/. If the path does not exists it is created inside the
@@ -793,7 +1068,7 @@ container.
.. code-block:: bash
docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
$ sudo docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd
The ``-v`` flag mounts the current working directory into the container.
The ``-w`` lets the command being executed inside the current
@@ -803,7 +1078,7 @@ using the container, but inside the current working directory.
.. code-block:: bash
docker run -p 127.0.0.1:80:8080 ubuntu bash
$ sudo docker run -p 127.0.0.1:80:8080 ubuntu bash
This binds port ``8080`` of the container to port ``80`` on 127.0.0.1 of the
host machine. :ref:`port_redirection` explains in detail how to manipulate ports
@@ -811,7 +1086,7 @@ in Docker.
.. code-block:: bash
docker run -expose 80 ubuntu bash
$ sudo docker run -expose 80 ubuntu bash
This exposes port ``80`` of the container for use within a link without
publishing the port to the host system's interfaces. :ref:`port_redirection`
@@ -819,14 +1094,14 @@ explains in detail how to manipulate ports in Docker.
.. code-block:: bash
docker run -name console -t -i ubuntu bash
$ sudo docker run -name console -t -i ubuntu bash
This will create and run a new container with the container name
being ``console``.
.. code-block:: bash
docker run -link /redis:redis -name console ubuntu bash
$ sudo docker run -link /redis:redis -name console ubuntu bash
The ``-link`` flag will link the container named ``/redis`` into the
newly created container with the alias ``redis``. The new container
@@ -836,7 +1111,7 @@ to the newly created container.
.. code-block:: bash
docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
$ sudo docker run -volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
The ``-volumes-from`` flag mounts all the defined volumes from the
refrence containers. Containers can be specified by a comma seperated
@@ -845,12 +1120,17 @@ id may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
read-only or read-write mode, respectively. By default, the volumes are mounted
in the same mode (rw or ro) as the reference container.
Known Issues (run -volumes-from)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _cli_save:
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
issue for a workaround.
``save``
---------
::
Usage: docker save image > repository.tar
Streams a tarred repository to the standard output stream.
Contains all parent layers, and all tags + versions.
.. _cli_search:
@@ -874,7 +1154,7 @@ Known Issues (run -volumes-from)
::
Usage: docker start [OPTIONS] NAME
Usage: docker start [OPTIONS] CONTAINER
Start a stopped container

View File

@@ -42,11 +42,10 @@ This following command will build a development environment using the Dockerfile
.. code-block:: bash
sudo docker build -t docker .
sudo make build
If the build is successful, congratulations! You have produced a clean build of docker, neatly encapsulated in a standard build environment.
If the build is successful, congratulations! You have produced a clean build of
docker, neatly encapsulated in a standard build environment.
Step 4: Build the Docker Binary
@@ -56,10 +55,23 @@ To create the Docker binary, run this command:
.. code-block:: bash
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary
sudo make binary
This will create the Docker binary in ``./bundles/<version>-dev/binary/``
Using your built Docker binary
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The binary is available outside the container in the directory
``./bundles/<version>-dev/binary/``. You can swap your host docker executable
with this binary for live testing - for example, on ubuntu:
.. code-block:: bash
sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start
.. note:: Its safer to run the tests below before swapping your hosts docker binary.
Step 5: Run the Tests
---------------------
@@ -68,10 +80,15 @@ To execute the test cases, run this command:
.. code-block:: bash
sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
sudo make test
Note: if you're running the tests in vagrant, you need to specify a dns entry in the command: `-dns 8.8.8.8`
Note: if you're running the tests in vagrant, you need to specify a dns entry in
the command (either edit the Makefile, or run the step manually):
.. code-block:: bash
sudo docker run -dns 8.8.8.8 -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test
If the test are successful then the tail of the output should look something like this
@@ -113,15 +130,24 @@ You can run an interactive session in the newly built container:
.. code-block:: bash
sudo docker run -privileged -i -t docker bash
sudo make shell
# type 'exit' to exit
# type 'exit' or Ctrl-D to exit
Extra Step: Build and view the Documentation
-------------------------------------------
.. note:: The binary is available outside the container in the directory ``./bundles/<version>-dev/binary/``. You can swap your host docker executable with this binary for live testing - for example, on ubuntu: ``sudo service docker stop ; sudo cp $(which docker) $(which docker)_ ; sudo cp ./bundles/<version>-dev/binary/docker-<version>-dev $(which docker);sudo service docker start``.
If you want to read the documentation from a local website, or are making changes
to it, you can build the documentation and then serve it by:
.. code-block:: bash
sudo make docs
# when its done, you can point your browser to http://yourdockerhost:8000
# type Ctrl-C to exit
**Need More Help?**
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailinglist <https://groups.google.com/d/forum/docker-dev>`_.
If you need more help then hop on to the `#docker-dev IRC channel <irc://chat.freenode.net#docker-dev>`_ or post a message on the `Docker developer mailing list <https://groups.google.com/d/forum/docker-dev>`_.

View File

@@ -0,0 +1,137 @@
:title: Process Management with CFEngine
:description: Managing containerized processes with CFEngine
:keywords: cfengine, process, management, usage, docker, documentation
Process Management with CFEngine
================================
Create Docker containers with managed processes.
Docker monitors one process in each running container and the container lives or dies with that process.
By introducing CFEngine inside Docker containers, we can alleviate a few of the issues that may arise:
* It is possible to easily start multiple processes within a container, all of which will be managed automatically, with the normal ``docker run`` command.
* If a managed process dies or crashes, CFEngine will start it again within 1 minute.
* The container itself will live as long as the CFEngine scheduling daemon (cf-execd) lives. With CFEngine, we are able to decouple the life of the container from the uptime of the service it provides.
How it works
------------
CFEngine, together with the cfe-docker integration policies, are installed as part of the Dockerfile. This builds CFEngine into our Docker image.
The Dockerfile's ``ENTRYPOINT`` takes an arbitrary amount of commands (with any desired arguments) as parameters.
When we run the Docker container these parameters get written to CFEngine policies and CFEngine takes over to ensure that the desired processes are running in the container.
CFEngine scans the process table for the ``basename`` of the commands given to the ``ENTRYPOINT`` and runs the command to start the process if the ``basename`` is not found.
For example, if we start the container with ``docker run "/path/to/my/application parameters"``, CFEngine will look for a process named ``application`` and run the command.
If an entry for ``application`` is not found in the process table at any point in time, CFEngine will execute ``/path/to/my/application parameters`` to start the application once again.
The check on the process table happens every minute.
Note that it is therefore important that the command to start your application leaves a process with the basename of the command.
This can be made more flexible by making some minor adjustments to the CFEngine policies, if desired.
Usage
-----
This example assumes you have Docker installed and working.
We will install and manage ``apache2`` and ``sshd`` in a single container.
There are three steps:
1. Install CFEngine into the container.
2. Copy the CFEngine Docker process management policy into the containerized CFEngine installation.
3. Start your application processes as part of the ``docker run`` command.
Building the container image
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The first two steps can be done as part of a Dockerfile, as follows.
.. code-block:: bash
FROM ubuntu
MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
RUN apt-get -y install wget lsb-release unzip
# install latest CFEngine
RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
RUN apt-get update
RUN apt-get install cfengine-community
# install cfe-docker process management policy
RUN wget --no-check-certificate https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
RUN cp /tmp/cfe-docker-master/cfengine/bin/* /var/cfengine/bin/
RUN cp /tmp/cfe-docker-master/cfengine/inputs/* /var/cfengine/inputs/
RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
# apache2 and openssh are just for testing purposes, install your own apps here
RUN apt-get -y install openssh-server apache2
RUN mkdir -p /var/run/sshd
RUN echo "root:password" | chpasswd # need a password for ssh
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
By saving this file as ``Dockerfile`` to a working directory, you can then build your container with the docker build command,
e.g. ``docker build -t managed_image``.
Testing the container
~~~~~~~~~~~~~~~~~~~~~
Start the container with ``apache2`` and ``sshd`` running and managed, forwarding a port to our SSH instance:
.. code-block:: bash
docker run -p 127.0.0.1:222:22 -d managed_image "/usr/sbin/sshd" "/etc/init.d/apache2 start"
We now clearly see one of the benefits of the cfe-docker integration: it allows to start several processes
as part of a normal ``docker run`` command.
We can now log in to our new container and see that both ``apache2`` and ``sshd`` are running. We have set the root password to
"password" in the Dockerfile above and can use that to log in with ssh:
.. code-block:: bash
ssh -p222 root@127.0.0.1
ps -ef
UID PID PPID C STIME TTY TIME CMD
root 1 0 0 07:48 ? 00:00:00 /bin/bash /var/cfengine/bin/docker_processes_run.sh /usr/sbin/sshd /etc/init.d/apache2 start
root 18 1 0 07:48 ? 00:00:00 /var/cfengine/bin/cf-execd -F
root 20 1 0 07:48 ? 00:00:00 /usr/sbin/sshd
root 32 1 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
www-data 34 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
www-data 35 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
www-data 36 32 0 07:48 ? 00:00:00 /usr/sbin/apache2 -k start
root 93 20 0 07:48 ? 00:00:00 sshd: root@pts/0
root 105 93 0 07:48 pts/0 00:00:00 -bash
root 112 105 0 07:49 pts/0 00:00:00 ps -ef
If we stop apache2, it will be started again within a minute by CFEngine.
.. code-block:: bash
service apache2 status
Apache2 is running (pid 32).
service apache2 stop
* Stopping web server apache2 ... waiting [ OK ]
service apache2 status
Apache2 is NOT running.
# ... wait up to 1 minute...
service apache2 status
Apache2 is running (pid 173).
Adapting to your applications
-----------------------------
To make sure your applications get managed in the same manner, there are just two things you need to adjust from the above example:
* In the Dockerfile used above, install your applications instead of ``apache2`` and ``sshd``.
* When you start the container with ``docker run``, specify the command line arguments to your applications rather than ``apache2`` and ``sshd``.

View File

@@ -131,8 +131,6 @@ Attach to the container to see the results in real-time.
- **"docker attach**" This will allow us to attach to a background
process to see what is going on.
- **"-sig-proxy=true"** Proxify all received signal to the process
(even in non-tty mode)
- **$CONTAINER_ID** The Id of the container we want to attach too.
Exit from the container attachment by pressing Control-C.

View File

@@ -24,3 +24,5 @@ to more substantial services like those which you might find in production.
postgresql_service
mongodb
running_riak_service
using_supervisord
cfengine_process_management

View File

@@ -7,26 +7,18 @@
PostgreSQL Service
==================
.. include:: example_header.inc
.. note::
A shorter version of `this blog post`_.
.. note::
As of version 0.5.2, Docker requires root privileges to run.
You have to either manually adjust your system configuration (permissions on
/var/run/docker.sock or sudo config), or prefix `docker` with `sudo`. Check
`this thread`_ for details.
.. _this blog post: http://zaiste.net/2013/08/docker_postgresql_how_to/
.. _this thread: https://groups.google.com/forum/?fromgroups#!topic/docker-club/P3xDLqmLp0E
Installing PostgreSQL on Docker
-------------------------------
For clarity I won't be showing command output.
Run an interactive shell in Docker container.
Run an interactive shell in a Docker container.
.. code-block:: bash
@@ -38,26 +30,26 @@ Update its dependencies.
apt-get update
Install ``python-software-properties``.
Install ``python-software-properties``, ``software-properties-common``, ``wget`` and ``vim``.
.. code-block:: bash
apt-get -y install python-software-properties
apt-get -y install software-properties-common
apt-get -y install python-software-properties software-properties-common wget vim
Add Pitti's PostgreSQL repository. It contains the most recent stable release
of PostgreSQL i.e. ``9.2``.
Add PostgreSQL's repository. It contains the most recent stable release
of PostgreSQL, ``9.3``.
.. code-block:: bash
add-apt-repository ppa:pitti/postgresql
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
apt-get update
Finally, install PostgreSQL 9.2
Finally, install PostgreSQL 9.3
.. code-block:: bash
apt-get -y install postgresql-9.2 postgresql-client-9.2 postgresql-contrib-9.2
apt-get -y install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
Now, create a PostgreSQL superuser role that can create databases and
other roles. Following Vagrant's convention the role will be named
@@ -76,15 +68,14 @@ role.
Adjust PostgreSQL configuration so that remote connections to the
database are possible. Make sure that inside
``/etc/postgresql/9.2/main/pg_hba.conf`` you have following line (you will need
to install an editor, e.g. ``apt-get install vim``):
``/etc/postgresql/9.3/main/pg_hba.conf`` you have following line:
.. code-block:: bash
host all all 0.0.0.0/0 md5
Additionaly, inside ``/etc/postgresql/9.2/main/postgresql.conf``
uncomment ``listen_addresses`` so it is as follows:
Additionaly, inside ``/etc/postgresql/9.3/main/postgresql.conf``
uncomment ``listen_addresses`` like so:
.. code-block:: bash
@@ -94,7 +85,7 @@ uncomment ``listen_addresses`` so it is as follows:
This PostgreSQL setup is for development only purposes. Refer
to PostgreSQL documentation how to fine-tune these settings so that it
is enough secure.
is secure enough.
Exit.
@@ -102,43 +93,43 @@ Exit.
exit
Create an image and assign it a name. ``<container_id>`` is in the
Bash prompt; you can also locate it using ``docker ps -a``.
Create an image from our container and assign it a name. The ``<container_id>``
is in the Bash prompt; you can also locate it using ``docker ps -a``.
.. code-block:: bash
sudo docker commit <container_id> <your username>/postgresql
Finally, run PostgreSQL server via ``docker``.
Finally, run the PostgreSQL server via ``docker``.
.. code-block:: bash
CONTAINER=$(sudo docker run -d -p 5432 \
-t <your username>/postgresql \
/bin/su postgres -c '/usr/lib/postgresql/9.2/bin/postgres \
-D /var/lib/postgresql/9.2/main \
-c config_file=/etc/postgresql/9.2/main/postgresql.conf')
/bin/su postgres -c '/usr/lib/postgresql/9.3/bin/postgres \
-D /var/lib/postgresql/9.3/main \
-c config_file=/etc/postgresql/9.3/main/postgresql.conf')
Connect the PostgreSQL server using ``psql`` (You will need postgres installed
on the machine. For ubuntu, use something like
``sudo apt-get install postgresql``).
Connect the PostgreSQL server using ``psql`` (You will need the
postgresql client installed on the machine. For ubuntu, use something
like ``sudo apt-get install postgresql-client``).
.. code-block:: bash
CONTAINER_IP=$(sudo docker inspect $CONTAINER | grep IPAddress | awk '{ print $2 }' | tr -d ',"')
CONTAINER_IP=$(sudo docker inspect -format='{{.NetworkSettings.IPAddress}}' $CONTAINER)
psql -h $CONTAINER_IP -p 5432 -d docker -U docker -W
As before, create roles or databases if needed.
.. code-block:: bash
psql (9.2.4)
psql (9.3.1)
Type "help" for help.
docker=# CREATE DATABASE foo OWNER=docker;
CREATE DATABASE
Additionally, publish your newly created image on Docker Index.
Additionally, publish your newly created image on the Docker Index.
.. code-block:: bash
@@ -160,9 +151,9 @@ container starts.
.. code-block:: bash
sudo docker commit -run='{"Cmd": \
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.2/bin/postgres -D \
/var/lib/postgresql/9.2/main -c \
config_file=/etc/postgresql/9.2/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.3/bin/postgres -D \
/var/lib/postgresql/9.3/main -c \
config_file=/etc/postgresql/9.3/main/postgresql.conf"], "PortSpecs": ["5432"]}' \
<container_id> <your username>/postgresql
From now on, just type ``docker run <your username>/postgresql`` and

View File

@@ -0,0 +1,128 @@
:title: Using Supervisor with Docker
:description: How to use Supervisor process management with Docker
:keywords: docker, supervisor, process management
.. _using_supervisord:
Using Supervisor with Docker
============================
.. include:: example_header.inc
Traditionally a Docker container runs a single process when it is launched, for
example an Apache daemon or a SSH server daemon. Often though you want to run
more than one process in a container. There are a number of ways you can
achieve this ranging from using a simple Bash script as the value of your
container's ``CMD`` instruction to installing a process management tool.
In this example we're going to make use of the process management tool,
`Supervisor <http://supervisord.org/>`_, to manage multiple processes in our
container. Using Supervisor allows us to better control, manage, and restart the
processes we want to run. To demonstrate this we're going to install and manage both an
SSH daemon and an Apache daemon.
Creating a Dockerfile
---------------------
Let's start by creating a basic ``Dockerfile`` for our new image.
.. code-block:: bash
FROM ubuntu:latest
MAINTAINER examples@docker.io
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
RUN apt-get upgrade -y
Installing Supervisor
---------------------
We can now install our SSH and Apache daemons as well as Supervisor in our container.
.. code-block:: bash
RUN apt-get install -y openssh-server apache2 supervisor
RUN mkdir -p /var/run/sshd
RUN mkdir -p /var/log/supervisor
Here we're installing the ``openssh-server``, ``apache2`` and ``supervisor``
(which provides the Supervisor daemon) packages. We're also creating two new
directories that are needed to run our SSH daemon and Supervisor.
Adding Supervisor's configuration file
--------------------------------------
Now let's add a configuration file for Supervisor. The default file is called
``supervisord.conf`` and is located in ``/etc/supervisor/conf.d/``.
.. code-block:: bash
ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf
Let's see what is inside our ``supervisord.conf`` file.
.. code-block:: bash
[supervisord]
nodaemon=true
[program:sshd]
command=/usr/sbin/sshd -D
[program:apache2]
command=/bin/bash -c "source /etc/apache2/envvars && /usr/sbin/apache2 -DFOREGROUND"
The ``supervisord.conf`` configuration file contains directives that configure
Supervisor and the processes it manages. The first block ``[supervisord]``
provides configuration for Supervisor itself. We're using one directive,
``nodaemon`` which tells Supervisor to run interactively rather than daemonize.
The next two blocks manage the services we wish to control. Each block controls
a separate process. The blocks contain a single directive, ``command``, which
specifies what command to run to start each process.
Exposing ports and running Supervisor
-------------------------------------
Now let's finish our ``Dockerfile`` by exposing some required ports and
specifying the ``CMD`` instruction to start Supervisor when our container
launches.
.. code-block:: bash
EXPOSE 22 80
CMD ["/usr/bin/supervisord"]
Here we've exposed ports 22 and 80 on the container and we're running the
``/usr/bin/supervisord`` binary when the container launches.
Building our container
----------------------
We can now build our new container.
.. code-block:: bash
sudo docker build -t <yourname>/supervisord .
Running our Supervisor container
--------------------------------
Once we've got a built image we can launch a container from it.
.. code-block:: bash
sudo docker run -p 22 -p 80 -t -i <yourname>/supervisor
2013-11-25 18:53:22,312 CRIT Supervisor running as root (no user in config file)
2013-11-25 18:53:22,312 WARN Included extra file "/etc/supervisor/conf.d/supervisord.conf" during parsing
2013-11-25 18:53:22,342 INFO supervisord started with pid 1
2013-11-25 18:53:23,346 INFO spawned: 'sshd' with pid 6
2013-11-25 18:53:23,349 INFO spawned: 'apache2' with pid 7
. . .
We've launched a new container interactively using the ``docker run`` command.
That container has run Supervisor and launched the SSH and Apache daemons with
it. We've specified the ``-p`` flag to expose ports 22 and 80. From here we can
now identify the exposed ports and connect to one or both of the SSH and Apache
daemons.

View File

@@ -22,22 +22,37 @@ Amazon QuickStart
1. **Choose an image:**
* Open http://cloud-images.ubuntu.com/locator/ec2/
* Enter ``amd64 precise`` in the search field (it will search as you
type)
* Pick an image by clicking on the image name. *An EBS-enabled
image will let you use a t1.micro instance.* Clicking on the image
name will take you to your AWS Console.
* Launch the `Create Instance Wizard
<https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
on your AWS Console.
* When picking the source AMI for your instance type, select "Community
AMIs".
* Search for ``amd64 precise``. Pick one of the amd64 Ubuntu images.
* If you choose a EBS enabled AMI, you'll also be able to launch a
``t1.micro`` instance (more info on `pricing
<http://aws.amazon.com/en/ec2/pricing/>`_). ``t1.micro`` instances are
eligible for Amazon's Free Usage Tier.
* When you click select you'll be taken to the instance setup, and you're one
click away from having your Ubuntu VM up and running.
2. **Tell CloudInit to install Docker:**
* Enter ``#include https://get.docker.io`` into the instance *User
Data*. `CloudInit <https://help.ubuntu.com/community/CloudInit>`_
is part of the Ubuntu image you chose and it bootstraps from this
*User Data*.
* When you're on the "Configure Instance Details" step, expand the "Advanced
Details" section.
3. After a few more standard choices where defaults are probably ok, your
AWS Ubuntu instance with Docker should be running!
* Under "User data", select "As text".
* Enter ``#include https://get.docker.io`` into the instance *User Data*.
`CloudInit <https://help.ubuntu.com/community/CloudInit>`_ is part of the
Ubuntu image you chose; it will bootstrap Docker by running the shell
script located at this URL.
3. After a few more standard choices where defaults are probably ok, your AWS
Ubuntu instance with Docker should be running!
**If this is your first AWS instance, you may need to set up your
Security Group to allow SSH.** By default all incoming ports to your
@@ -154,7 +169,7 @@ Docker that way too. Vagrant 1.1 or higher is required.
includes rights to SSH (port 22) to your container.
If you have an advanced AWS setup, you might want to have a look at
https://github.com/mitchellh/vagrant-aws
`vagrant-aws <https://github.com/mitchellh/vagrant-aws>`_.
7. Connect to your machine

View File

@@ -1,5 +1,5 @@
:title: Installation on Arch Linux
:description: Docker installation on Arch Linux.
:description: Docker installation on Arch Linux.
:keywords: arch linux, virtualization, docker, documentation, installation
.. _arch_linux:
@@ -7,54 +7,58 @@
Arch Linux
==========
Installing on Arch Linux is not officially supported but can be handled via
either of the following AUR packages:
.. include:: install_header.inc
* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
.. include:: install_unofficial.inc
The lxc-docker package will install the latest tagged version of docker.
The lxc-docker-git package will build from the current master branch.
Installing on Arch Linux can be handled via the package in community:
* `docker <https://www.archlinux.org/packages/community/x86_64/docker/>`_
or the following AUR package:
* `docker-git <https://aur.archlinux.org/packages/docker-git/>`_
The docker package will install the latest tagged version of docker.
The docker-git package will build from the current master branch.
Dependencies
------------
Docker depends on several packages which are specified as dependencies in
either AUR package.
the packages. The core dependencies are:
* aufs3
* bridge-utils
* go
* device-mapper
* iproute2
* linux-aufs_friendly
* lxc
* sqlite
Installation
------------
.. include:: install_header.inc
For the normal package a simple
::
.. include:: install_unofficial.inc
pacman -S docker
is all that is needed.
For the AUR package execute:
::
yaourt -S docker-git
The instructions here assume **yaourt** is installed. See
`Arch User Repository <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_
for information on building and installing packages from the AUR if you have not
done so before.
Keep in mind that if **linux-aufs_friendly** is not already installed that a
new kernel will be compiled and this can take quite a while.
::
yaourt -S lxc-docker-git
Starting Docker
---------------
Prior to starting docker modify your bootloader to use the
**linux-aufs_friendly** kernel and reboot your system.
There is a systemd service unit created for docker. To start the docker service:
::

View File

@@ -12,17 +12,9 @@ Binaries
**This instruction set is meant for hackers who want to try out Docker
on a variety of environments.**
Right now, the officially supported distributions are:
- :ref:`ubuntu_precise`
- :ref:`ubuntu_raring`
But we know people have had success running it under
- Debian
- Suse
- :ref:`arch_linux`
Before following these directions, you should really check if a packaged version
of Docker is already available for your distribution. We have packages for many
distributions, and more keep showing up all the time!
Check Your Kernel
-----------------
@@ -34,7 +26,7 @@ Get the docker binary:
.. code-block:: bash
wget --output-document=docker https://get.docker.io/builds/Linux/x86_64/docker-latest
wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker
chmod +x docker

View File

@@ -0,0 +1,52 @@
:title: Requirements and Installation on Fedora
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, fedora, requirements, virtualbox, vagrant, git, ssh, putty, cygwin, linux
.. _fedora:
Fedora
======
.. include:: install_header.inc
.. include:: install_unofficial.inc
Docker is available in **Fedora 19 and later**. Please note that due to the
current Docker limitations Docker is able to run only on the **64 bit**
architecture.
Installation
------------
Install the ``docker-io`` package which will install Docker on our host.
.. code-block:: bash
sudo yum -y install docker-io
To update the ``docker-io`` package
.. code-block:: bash
sudo yum -y update docker-io
Now that it's installed, let's start the Docker daemon.
.. code-block:: bash
sudo systemctl start docker
If we want Docker to start at boot, we should also:
.. code-block:: bash
sudo systemctl enable docker
Now let's verify that Docker is working.
.. code-block:: bash
sudo docker run -i -t mattdm/fedora /bin/bash
**Done!**, now continue with the :ref:`hello_world` example.

View File

@@ -4,8 +4,8 @@
.. _gentoo_linux:
Gentoo Linux
============
Gentoo
======
.. include:: install_header.inc
@@ -22,17 +22,19 @@ provided at https://github.com/tianon/docker-overlay which can be added using
properly installing and using the overlay can be found in `the overlay README
<https://github.com/tianon/docker-overlay/blob/master/README.md#using-this-overlay>`_.
Note that sometimes there is a disparity between the latest version and what's
in the overlay, and between the latest version in the overlay and what's in the
portage tree. Please be patient, and the latest version should propagate
shortly.
Installation
^^^^^^^^^^^^
The package should properly pull in all the necessary dependencies and prompt
for all necessary kernel options. For the most straightforward installation
experience, use ``sys-kernel/aufs-sources`` as your kernel sources. If you
prefer not to use ``sys-kernel/aufs-sources``, the portage tree also contains
``sys-fs/aufs3``, which includes the patches necessary for adding AUFS support
to other kernel source packages such as ``sys-kernel/gentoo-sources`` (and a
``kernel-patch`` USE flag to perform the patching to ``/usr/src/linux``
automatically).
for all necessary kernel options. The ebuilds for 0.7+ include use flags to
pull in the proper dependencies of the major storage drivers, with the
"device-mapper" use flag being enabled by default, since that is the simplest
installation path.
.. code-block:: bash
@@ -47,9 +49,9 @@ the #docker IRC channel on the freenode network.
Starting Docker
^^^^^^^^^^^^^^^
Ensure that you are running a kernel that includes the necessary AUFS
patches/support and includes all the necessary modules and/or configuration for
LXC.
Ensure that you are running a kernel that includes all the necessary modules
and/or configuration for LXC (and optionally for device-mapper and/or AUFS,
depending on the storage driver you've decided to use).
OpenRC
------

View File

@@ -0,0 +1,65 @@
:title: Installation on Google Cloud Platform
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, installation, google, Google Compute Engine, Google Cloud Platform
`Google Cloud Platform <https://cloud.google.com/>`_
====================================================
.. include:: install_header.inc
.. _googlequickstart:
`Compute Engine <https://developers.google.com/compute>`_ QuickStart for `Debian <https://www.debian.org>`_
-----------------------------------------------------------------------------------------------------------
1. Go to `Google Cloud Console <https://cloud.google.com/console>`_ and create a new Cloud Project with `Compute Engine enabled <https://developers.google.com/compute/docs/signup>`_.
2. Download and configure the `Google Cloud SDK <https://developers.google.com/cloud/sdk/>`_ to use your project with the following commands:
.. code-block:: bash
$ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash
$ gcloud auth login
Enter a cloud project id (or leave blank to not set): <google-cloud-project-id>
3. Start a new instance, select a zone close to you and the desired instance size:
.. code-block:: bash
$ gcutil addinstance docker-playground --image=backports-debian-7
1: europe-west1-a
...
4: us-central1-b
>>> <zone-index>
1: machineTypes/n1-standard-1
...
12: machineTypes/g1-small
>>> <machine-type-index>
4. Connect to the instance using SSH:
.. code-block:: bash
$ gcutil ssh docker-playground
docker-playground:~$
5. Enable IP forwarding:
.. code-block:: bash
docker-playground:~$ echo net.ipv4.ip_forward=1 | sudo tee /etc/sysctl.d/99-docker.conf
docker-playground:~$ sudo sysctl --system
6. Install the latest Docker release and configure it to start when the instance boots:
.. code-block:: bash
docker-playground:~$ curl get.docker.io | bash
docker-playground:~$ sudo update-rc.d docker defaults
7. Start a new container:
.. code-block:: bash
docker-playground:~$ sudo docker run busybox echo 'docker on GCE \o/'
docker on GCE \o/

View File

@@ -9,7 +9,7 @@ Installation
There are a number of ways to install Docker, depending on where you
want to run the daemon. The :ref:`ubuntu_linux` installation is the
officially-tested version, and the community adds more techniques for
officially-tested version. The community adds more techniques for
installing Docker all the time.
Contents:
@@ -18,13 +18,16 @@ Contents:
:maxdepth: 1
ubuntulinux
binaries
security
upgrading
kernel
rhel
fedora
archlinux
gentoolinux
vagrant
windows
amazon
rackspace
archlinux
gentoolinux
google
kernel
binaries
security
upgrading

View File

@@ -11,10 +11,10 @@ In short, Docker has the following kernel requirements:
- Linux version 3.8 or above.
- `AUFS support <http://aufs.sourceforge.net/>`_.
- Cgroups and namespaces must be enabled.
*Note: as of 0.7 docker no longer requires aufs. AUFS support is still available as an optional driver.*
The officially supported kernel is the one recommended by the
:ref:`ubuntu_linux` installation path. It is the one that most developers
will use, and the one that receives the most attention from the core
@@ -58,17 +58,6 @@ detects something older than 3.8.
See issue `#407 <https://github.com/dotcloud/docker/issues/407>`_ for details.
AUFS support
------------
Docker currently relies on AUFS, an unioning filesystem.
While AUFS is included in the kernels built by the Debian and Ubuntu
distributions, is not part of the standard kernel. This means that if
you decide to roll your own kernel, you will have to patch your
kernel tree to add AUFS. The process is documented on
`AUFS webpage <http://aufs.sourceforge.net/>`_.
Cgroups and namespaces
----------------------
@@ -122,3 +111,40 @@ And replace it by the following one::
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
Then run ``update-grub``, and reboot.
Details
-------
Networking:
- CONFIG_BRIDGE
- CONFIG_NETFILTER_XT_MATCH_ADDRTYPE
- CONFIG_NF_NAT
- CONFIG_NF_NAT_IPV4
- CONFIG_NF_NAT_NEEDED
LVM:
- CONFIG_BLK_DEV_DM
- CONFIG_DM_THIN_PROVISIONING
- CONFIG_EXT4_FS
Namespaces:
- CONFIG_NAMESPACES
- CONFIG_UTS_NS
- CONFIG_IPC_NS
- CONFIG_UID_NS
- CONFIG_PID_NS
- CONFIG_NET_NS
Cgroups:
- CONFIG_CGROUPS
Cgroup controllers (optional but highly recommended):
- CONFIG_CGROUP_CPUACCT
- CONFIG_BLK_CGROUP
- CONFIG_MEMCG
- CONFIG_MEMCG_SWAP

View File

@@ -2,7 +2,6 @@
:description: Installing Docker on Ubuntu proviced by Rackspace
:keywords: Rackspace Cloud, installation, docker, linux, ubuntu
===============
Rackspace Cloud
===============
@@ -14,14 +13,14 @@ straightforward, and you should mostly be able to follow the
**However, there is one caveat:**
If you are using any linux not already shipping with the 3.8 kernel
If you are using any Linux not already shipping with the 3.8 kernel
you will need to install it. And this is a little more difficult on
Rackspace.
Rackspace boots their servers using grub's ``menu.lst`` and does not
like non 'virtual' packages (e.g. xen compatible) kernels there,
although they do work. This makes ``update-grub`` to not have the
expected result, and you need to set the kernel manually.
like non 'virtual' packages (e.g. Xen compatible) kernels there,
although they do work. This results in ``update-grub`` not having the
expected result, and you will need to set the kernel manually.
**Do not attempt this on a production machine!**
@@ -34,7 +33,7 @@ expected result, and you need to set the kernel manually.
apt-get install linux-generic-lts-raring
Great, now you have kernel installed in ``/boot/``, next is to make it
Great, now you have the kernel installed in ``/boot/``, next you need to make it
boot next time.
.. code-block:: bash
@@ -48,9 +47,9 @@ boot next time.
Now you need to manually edit ``/boot/grub/menu.lst``, you will find a
section at the bottom with the existing options. Copy the top one and
substitute the new kernel into that. Make sure the new kernel is on
top, and double check kernel and initrd point to the right files.
top, and double check the kernel and initrd lines point to the right files.
Make special care to double check the kernel and initrd entries.
Take special care to double check the kernel and initrd entries.
.. code-block:: bash
@@ -79,7 +78,7 @@ It will probably look something like this:
initrd /boot/initrd.img-3.2.0-38-virtual
Reboot server (either via command line or console)
Reboot the server (either via command line or console)
.. code-block:: bash

View File

@@ -0,0 +1,71 @@
:title: Requirements and Installation on Red Hat Enterprise Linux
:description: Please note this project is currently under heavy development. It should not be used in production.
:keywords: Docker, Docker documentation, requirements, linux, rhel, centos
.. _rhel:
Red Hat Enterprise Linux
========================
.. include:: install_header.inc
.. include:: install_unofficial.inc
Docker is available for **RHEL** on EPEL. These instructions should work for
both RHEL and CentOS. They will likely work for other binary compatible EL6
distributions as well, but they haven't been tested.
Please note that this package is part of `Extra Packages for Enterprise
Linux (EPEL)`_, a community effort to create and maintain additional packages
for the RHEL distribution.
Also note that due to the current Docker limitations, Docker is able to run
only on the **64 bit** architecture.
Installation
------------
Firstly, you need to install the EPEL repository. Please follow the `EPEL installation instructions`_.
Next, let's install the ``docker-io`` package which will install Docker on our host.
.. code-block:: bash
sudo yum -y install docker-io
To update the ``docker-io`` package
.. code-block:: bash
sudo yum -y update docker-io
Now that it's installed, let's start the Docker daemon.
.. code-block:: bash
sudo service docker start
If we want Docker to start at boot, we should also:
.. code-block:: bash
sudo chkconfig docker on
Now let's verify that Docker is working.
.. code-block:: bash
sudo docker run -i -t mattdm/fedora /bin/bash
**Done!**, now continue with the :ref:`hello_world` example.
Issues?
-------
If you have any issues - please report them directly in the `Red Hat Bugzilla for docker-io component`_.
.. _Extra Packages for Enterprise Linux (EPEL): https://fedoraproject.org/wiki/EPEL
.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io

View File

@@ -4,8 +4,8 @@
.. _ubuntu_linux:
Ubuntu Linux
============
Ubuntu
======
.. warning::
@@ -14,16 +14,11 @@ Ubuntu Linux
.. include:: install_header.inc
Right now, the officially supported distribution are:
Docker is supported on the following versions of Ubuntu:
- :ref:`ubuntu_precise`
- :ref:`ubuntu_raring`
Docker has the following dependencies
* Linux kernel 3.8 (read more about :ref:`kernel`)
* AUFS file system support (we are working on BTRFS support as an alternative)
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated
Firewall) <https://help.ubuntu.com/community/UFW>`_
@@ -68,34 +63,48 @@ Installation
These instructions have changed for 0.6. If you are upgrading from
an earlier version, you will need to follow them again.
Docker is available as a Debian package, which makes installation easy.
Docker is available as a Debian package, which makes installation
easy. **See the :ref:`installmirrors` section below if you are not in
the United States.** Other sources of the Debian packages may be
faster for you to install.
First add the Docker repository key to your local keychain. You can use the
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
6E92 D857 6A8B A88D 21E9``
.. code-block:: bash
# Add the Docker repository key to your local keychain
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
# Add the Docker repository to your apt sources list.
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
Add the Docker repository to your apt sources list, update and install the
``lxc-docker`` package.
# Update your sources
sudo apt-get update
# Install, you will see another warning that the package cannot be authenticated. Confirm install.
sudo apt-get install lxc-docker
Verify it worked
*You may receive a warning that the package isn't trusted. Answer yes to
continue installation.*
.. code-block:: bash
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker
.. note::
There is also a simple ``curl`` script available to help with this process.
.. code-block:: bash
curl -s https://get.docker.io/ubuntu/ | sudo sh
Now verify that the installation has worked by downloading the ``ubuntu`` image
and launching a container.
.. code-block:: bash
# download the base 'ubuntu' container and run bash inside it while setting up an interactive shell
sudo docker run -i -t ubuntu /bin/bash
# type 'exit' to exit
Type ``exit`` to exit
**Done!**, now continue with the :ref:`hello_world` example.
@@ -107,10 +116,13 @@ Ubuntu Raring 13.04 (64 bit)
Dependencies
------------
**AUFS filesystem support**
**Optional AUFS filesystem support**
Ubuntu Raring already comes with the 3.8 kernel, so we don't need to install it. However, not all systems
have AUFS filesystem support enabled, so we need to install it.
have AUFS filesystem support enabled. AUFS support is optional as of version 0.7, but it's still available as
a driver and we recommend using it if you can.
To make sure AUFS is installed, run the following commands:
.. code-block:: bash
@@ -123,36 +135,37 @@ Installation
Docker is available as a Debian package, which makes installation easy.
*Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
to follow them again.*
.. warning::
Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
to follow them again.
First add the Docker repository key to your local keychain. You can use the
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
6E92 D857 6A8B A88D 21E9``
.. code-block:: bash
# Add the Docker repository key to your local keychain
# using apt-key finger you can check the fingerprint matches 36A1 D786 9245 C895 0F96 6E92 D857 6A8B A88D 21E9
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
# Add the Docker repository to your apt sources list.
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
# update
sudo apt-get update
# install
sudo apt-get install lxc-docker
Verify it worked
Add the Docker repository to your apt sources list, update and install the
``lxc-docker`` package.
.. code-block:: bash
sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker
Now verify that the installation has worked by downloading the ``ubuntu`` image
and launching a container.
.. code-block:: bash
# download the base 'ubuntu' container
# and run bash inside it while setting up an interactive shell
sudo docker run -i -t ubuntu /bin/bash
# type exit to exit
Type ``exit`` to exit
**Done!**, now continue with the :ref:`hello_world` example.
@@ -162,8 +175,8 @@ Verify it worked
Docker and UFW
^^^^^^^^^^^^^^
Docker uses a bridge to manage container networking. By default, UFW
drops all `forwarding`, thus a first step is to enable UFW forwarding:
Docker uses a bridge to manage container networking. By default, UFW drops all
`forwarding` traffic. As a result will you need to enable UFW forwarding:
.. code-block:: bash
@@ -181,11 +194,33 @@ Then reload UFW:
sudo ufw reload
UFW's default set of rules denied all `incoming`, so if you want to be
able to reach your containers from another host, you should allow
incoming connections on the docker port (default 4243):
UFW's default set of rules denies all `incoming` traffic. If you want to be
able to reach your containers from another host then you should allow
incoming connections on the Docker port (default 4243):
.. code-block:: bash
sudo ufw allow 4243/tcp
.. _installmirrors:
Mirrors
^^^^^^^
You should ``ping get.docker.io`` and compare the latency to the
following mirrors, and pick whichever one is best for you.
Yandex
------
`Yandex <http://yandex.ru/>`_ in Russia is mirroring the Docker Debian
packages, updating every 6 hours. Substitute
``http://mirror.yandex.ru/mirrors/docker/`` for
``http://get.docker.io/ubuntu`` in the instructions above. For example:
.. code-block:: bash
sudo sh -c "echo deb http://mirror.yandex.ru/mirrors/docker/ docker main\
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker

View File

@@ -0,0 +1,183 @@
:title: Link via an Ambassador Container
:description: Using the Ambassador pattern to abstract (network) services
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
.. _ambassador_pattern_linking:
Link via an Ambassador Container
================================
Rather than hardcoding network links between a service consumer and provider, Docker
encourages service portability.
eg, instead of
.. code-block:: bash
(consumer) --> (redis)
requiring you to restart the ``consumer`` to attach it to a different ``redis`` service,
you can add ambassadors
.. code-block:: bash
(consumer) --> (redis-ambassador) --> (redis)
or
(consumer) --> (redis-ambassador) ---network---> (redis-ambassador) --> (redis)
When you need to rewire your consumer to talk to a different redis server, you
can just restart the ``redis-ambassador`` container that the consumer is connected to.
This pattern also allows you to transparently move the redis server to a different
docker host from the consumer.
Using the ``svendowideit/ambassador`` container, the link wiring is controlled entirely
from the ``docker run`` parameters.
Two host Example
----------------
Start actual redis server on one Docker host
.. code-block:: bash
big-server $ docker run -d -name redis crosbymichael/redis
Then add an ambassador linked to the redis server, mapping a port to the outside world
.. code-block:: bash
big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
.. code-block:: bash
client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
Then on the ``client-server`` host, you can use a redis client container to talk
to the remote redis server, just by linking to the local redis ambassador.
.. code-block:: bash
client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
How it works
------------
The following example shows what the ``svendowideit/ambassador`` container does
automatically (with a tiny amount of ``sed``)
On the docker host (192.168.1.52) that redis will run on:
.. code-block:: bash
# start actual redis server
$ docker run -d -name redis crosbymichael/redis
# get a redis-cli container for connection testing
$ docker pull relateiq/redis-cli
# test the redis server by talking to it directly
$ docker run -t -i -rm -link redis:redis relateiq/redis-cli
redis 172.17.0.136:6379> ping
PONG
^D
# add redis ambassador
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh
in the redis_ambassador container, you can see the linked redis containers's env
.. code-block:: bash
$ env
REDIS_PORT=tcp://172.17.0.136:6379
REDIS_PORT_6379_TCP_ADDR=172.17.0.136
REDIS_NAME=/redis_ambassador/redis
HOSTNAME=19d7adf4705e
REDIS_PORT_6379_TCP_PORT=6379
HOME=/
REDIS_PORT_6379_TCP_PROTO=tcp
container=lxc
REDIS_PORT_6379_TCP=tcp://172.17.0.136:6379
TERM=xterm
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
PWD=/
This environment is used by the ambassador socat script to expose redis to the world
(via the -p 6379:6379 port mapping)
.. code-block:: bash
$ docker rm redis_ambassador
$ sudo ./contrib/mkimage-unittest.sh
$ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
then ping the redis server via the ambassador
.. code-block::bash
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
Now goto a different server
.. code-block:: bash
$ sudo ./contrib/mkimage-unittest.sh
$ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
and get the redis-cli image so we can talk over the ambassador bridge
.. code-block:: bash
$ docker pull relateiq/redis-cli
$ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
The svendowideit/ambassador Dockerfile
--------------------------------------
The ``svendowideit/ambassador`` image is a small busybox image with ``socat`` built in.
When you start the container, it uses a small ``sed`` script to parse out the (possibly multiple)
link environment variables to set up the port forwarding. On the remote host, you need to set the
variable using the ``-e`` command line option.
``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
::
#
#
# first you need to build the docker-ut image
# using ./contrib/mkimage-unittest.sh
# then
# docker build -t SvenDowideit/ambassador .
# docker tag SvenDowideit/ambassador ambassador
# then to run it (on the host that has the real backend on it)
# docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador
# on the remote host, you can set up another ambassador
# docker run -t -i -name redis_ambassador -expose 6379 sh
FROM docker-ut
MAINTAINER SvenDowideit@home.org.au
CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh && top

View File

@@ -1,10 +1,10 @@
:title: Base Image Creation
:title: Create a Base Image
:description: How to create base images
:keywords: Examples, Usage, base image, docker, documentation, examples
.. _base_image_creation:
Base Image Creation
Create a Base Image
===================
So you want to create your own :ref:`base_image_def`? Great!
@@ -37,7 +37,7 @@ There are more example scripts for creating base images in the
Docker Github Repo:
* `BusyBox <https://github.com/dotcloud/docker/blob/master/contrib/mkimage-busybox.sh>`_
* `CentOS
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-centos.sh>`_
* `Debian/Ubuntu
* `CentOS / Scientific Linux CERN (SLC)
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-rinse.sh>`_
* `Debian / Ubuntu
<https://github.com/dotcloud/docker/blob/master/contrib/mkimage-debootstrap.sh>`_

View File

@@ -1,10 +1,10 @@
:title: Basic Commands
:title: Learn Basic Commands
:description: Common usage and commands
:keywords: Examples, Usage, basic commands, docker, documentation, examples
The Basics
==========
Learn Basic Commands
====================
Starting Docker
---------------
@@ -67,7 +67,7 @@ daemon will make the ownership of the Unix socket read/writable by the
*docker* group when the daemon starts. The ``docker`` daemon must
always run as root, but if you run the ``docker`` client as a user in
the *docker* group then you don't need to add ``sudo`` to all the
client commands.
client commands. Warning: the *docker* group is root-equivalent.
**Example:**
@@ -76,11 +76,11 @@ client commands.
# Add the docker group if it doesn't already exist.
sudo groupadd docker
# Add the user "ubuntu" to the docker group.
# Add the connected user "${USERNAME}" to the docker group.
# Change the user name to match your preferred user.
# You may have to logout and log back in again for
# this to take effect.
sudo gpasswd -a ubuntu docker
sudo gpasswd -a ${USERNAME} docker
# Restart the docker daemon.
sudo service docker restart

View File

@@ -1,12 +1,12 @@
:title: Dockerfiles for Images
:title: Build Images (Dockerfile Reference)
:description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image.
:keywords: builder, docker, Dockerfile, automation, image creation
.. _dockerbuilder:
======================
Dockerfiles for Images
======================
===================================
Build Images (Dockerfile Reference)
===================================
**Docker can act as a builder** and read instructions from a text
``Dockerfile`` to automate the steps you would otherwise take manually
@@ -15,27 +15,39 @@ commit them along the way, giving you a final image.
.. contents:: Table of Contents
.. _dockerfile_usage:
1. Usage
========
To build an image from a source repository, create a description file
called ``Dockerfile`` at the root of your repository. This file will
describe the steps to assemble the image.
To :ref:`build <cli_build>` an image from a source repository, create
a description file called ``Dockerfile`` at the root of your
repository. This file will describe the steps to assemble the image.
Then call ``docker build`` with the path of your source repository as
argument:
argument (for example, ``.``):
``sudo docker build .``
The path to the source repository defines where to find the *context*
of the build. The build is run by the Docker daemon, not by the CLI,
so the whole context must be transferred to the daemon. The Docker CLI
reports "Uploading context" when the context is sent to the daemon.
You can specify a repository and tag at which to save the new image if the
build succeeds:
``sudo docker build -t shykes/myapp .``
Docker will run your steps one-by-one, committing the result if necessary,
before finally outputting the ID of your new image.
The Docker daemon will run your steps one-by-one, committing the
result if necessary, before finally outputting the ID of your new
image. The Docker daemon will automatically clean up the context you
sent.
When you're done with your build, you're ready to look into :ref:`image_push`.
When you're done with your build, you're ready to look into
:ref:`image_push`.
.. _dockerfile_format:
2. Format
=========
@@ -63,12 +75,16 @@ allows statements like:
# Comment
RUN echo 'we are running some # of cool things'
.. _dockerfile_instructions:
3. Instructions
===============
Here is the set of instructions you can use in a ``Dockerfile`` for
building images.
.. _dockerfile_from:
3.1 FROM
--------
@@ -94,6 +110,8 @@ output by the commit before each new ``FROM`` command.
If no ``tag`` is given to the ``FROM`` instruction, ``latest`` is
assumed. If the used tag does not exist, an error will be returned.
.. _dockerfile_maintainer:
3.2 MAINTAINER
--------------
@@ -102,6 +120,8 @@ assumed. If the used tag does not exist, an error will be returned.
The ``MAINTAINER`` instruction allows you to set the *Author* field of
the generated images.
.. _dockerfile_run:
3.3 RUN
-------
@@ -124,7 +144,7 @@ Known Issues (RUN)
``rm`` a file, for example. The issue describes a workaround.
* :issue:`2424` Locale will not be set automatically.
.. _dockerfile_cmd:
3.4 CMD
-------
@@ -169,7 +189,7 @@ array:
If you would like your container to run the same executable every
time, then you should consider using ``ENTRYPOINT`` in combination
with ``CMD``. See :ref:`entrypoint_def`.
with ``CMD``. See :ref:`dockerfile_entrypoint`.
If the user specifies arguments to ``docker run`` then they will
override the default specified in CMD.
@@ -179,6 +199,8 @@ override the default specified in CMD.
command and commits the result; ``CMD`` does not execute anything at
build time, but specifies the intended command for the image.
.. _dockerfile_expose:
3.5 EXPOSE
----------
@@ -189,6 +211,8 @@ functionally equivalent to running ``docker commit -run '{"PortSpecs":
["<port>", "<port2>"]}'`` outside the builder. Refer to
:ref:`port_redirection` for detailed information.
.. _dockerfile_env:
3.6 ENV
-------
@@ -203,6 +227,8 @@ with ``<key>=<value>``
The environment variables will persist when a container is run
from the resulting image.
.. _dockerfile_add:
3.7 ADD
-------
@@ -263,7 +289,7 @@ The copy obeys the following rules:
* If ``<dest>`` doesn't exist, it is created along with all missing
directories in its path.
.. _entrypoint_def:
.. _dockerfile_entrypoint:
3.8 ENTRYPOINT
--------------
@@ -312,6 +338,7 @@ this optional but default, you could use a CMD:
CMD ["-l", "-"]
ENTRYPOINT ["/usr/bin/wc"]
.. _dockerfile_volume:
3.9 VOLUME
----------
@@ -322,6 +349,8 @@ The ``VOLUME`` instruction will create a mount point with the specified name and
as holding externally mounted volumes from native host or other containers. For more information/examples
and mounting instructions via docker client, refer to :ref:`volume_def` documentation.
.. _dockerfile_user:
3.10 USER
---------
@@ -330,6 +359,8 @@ and mounting instructions via docker client, refer to :ref:`volume_def` document
The ``USER`` instruction sets the username or UID to use when running
the image.
.. _dockerfile_workdir:
3.11 WORKDIR
------------
@@ -338,6 +369,7 @@ the image.
The ``WORKDIR`` instruction sets the working directory in which
the command given by ``CMD`` is executed.
.. _dockerfile_examples:
4. Dockerfile Examples
======================

View File

@@ -1,11 +1,11 @@
:title: Host Integration
:title: Automatically Start Containers
:description: How to generate scripts for upstart, systemd, etc.
:keywords: systemd, upstart, supervisor, docker, documentation, host integration
Host Integration
================
Automatically Start Containers
==============================
You can use your Docker containers with process managers like ``upstart``,
``systemd`` and ``supervisor``.

View File

@@ -17,7 +17,9 @@ Contents:
workingwithrepository
baseimages
port_redirection
puppet
networking
host_integration
working_with_volumes
working_with_links_names
ambassador_pattern_linking
puppet

View File

@@ -0,0 +1,153 @@
:title: Configure Networking
:description: Docker networking
:keywords: network, networking, bridge, docker, documentation
Configure Networking
====================
Docker uses Linux bridge capabilities to provide network connectivity
to containers. The ``docker0`` bridge interface is managed by Docker
itself for this purpose. Thus, when the Docker daemon starts it :
- creates the ``docker0`` bridge if not present
- searches for an IP address range which doesn't overlap with an existing route
- picks an IP in the selected range
- assigns this IP to the ``docker0`` bridge
.. code-block:: bash
# List host bridges
$ sudo brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.000000000000 no
# Show docker0 IP address
$ sudo ifconfig docker0
docker0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
inet addr:172.17.42.1 Bcast:0.0.0.0 Mask:255.255.0.0
At runtime, a :ref:`specific kind of virtual
interface<vethxxxx-device>` is given to each containers which is then
bonded to the ``docker0`` bridge. Each containers also receives a
dedicated IP address from the same range as ``docker0``. The
``docker0`` IP address is then used as the default gateway for the
containers.
.. code-block:: bash
# Run a container
$ sudo docker run -t -i -d base /bin/bash
52f811c5d3d69edddefc75aff5a4525fc8ba8bcfa1818132f9dc7d4f7c7e78b4
$ sudo brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.fef213db5a66 no vethQCDY1N
Above, ``docker0`` acts as a bridge for the ``vethQCDY1N`` interface
which is dedicated to the 52f811c5d3d6 container.
How to use a specific IP address range
---------------------------------------
Docker will try hard to find an IP range which is not used by the
host. Even if it works for most cases, it's not bullet-proof and
sometimes you need to have more control over the IP addressing scheme.
For this purpose, Docker allows you to manage the ``docker0`` bridge
or your own one using the ``-b=<bridgename>`` parameter.
In this scenario:
- ensure Docker is stopped
- create your own bridge (``bridge0`` for example)
- assign a specific IP to this bridge
- start Docker with the ``-b=bridge0`` parameter
.. code-block:: bash
# Stop Docker
$ sudo service docker stop
# Clean docker0 bridge and
# add your very own bridge0
$ sudo ifconfig docker0 down
$ sudo brctl addbr bridge0
$ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0
# Edit your Docker startup file
$ echo "DOCKER_OPTS=\"-b=bridge0\"" /etc/default/docker
# Start Docker
$ sudo service docker start
# Ensure bridge0 IP is not changed by Docker
$ sudo ifconfig bridge0
bridge0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
inet addr:192.168.227.1 Bcast:192.168.227.255 Mask:255.255.255.0
# Run a container
$ docker run -i -t base /bin/bash
# Container IP in the 192.168.227/24 range
root@261c272cd7d5:/# ifconfig eth0
eth0 Link encap:Ethernet HWaddr xx:xx:xx:xx:xx:xx
inet addr:192.168.227.5 Bcast:192.168.227.255 Mask:255.255.255.0
# bridge0 IP as the default gateway
root@261c272cd7d5:/# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.227.1 0.0.0.0 UG 0 0 0 eth0
192.168.227.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
# hits CTRL+P then CTRL+Q to detach
# Display bridge info
$ sudo brctl show
bridge name bridge id STP enabled interfaces
bridge0 8000.fe7c2e0faebd no vethAQI2QT
Container intercommunication
-------------------------------
Containers can communicate with each other according to the ``icc``
parameter value of the Docker daemon.
- The default, ``-icc=true`` allows containers to communicate with each other.
- ``-icc=false`` means containers are isolated from each other.
Under the hood, ``iptables`` is used by Docker to either accept or
drop communication between containers.
.. _vethxxxx-device:
What's about the vethXXXX device?
-----------------------------------
Well. Things get complicated here.
The ``vethXXXX`` interface is the host side of a point-to-point link
between the host and the corresponding container, the other side of
the link being materialized by the container's ``eth0``
interface. This pair (host ``vethXXX`` and container ``eth0``) are
connected like a tube. Everything that comes in one side will come out
the other side.
All the plumbing is delegated to Linux network capabilities (check the
ip link command) and the namespaces infrastructure.
I want more
------------
Jérôme Petazzoni has create ``pipework`` to connect together
containers in arbitrarily complex scenarios :
https://github.com/jpetazzo/pipework

View File

@@ -1,12 +1,12 @@
:title: Port redirection
:title: Redirect Ports
:description: usage about port redirection
:keywords: Usage, basic port, docker, documentation, examples
.. _port_redirection:
Port redirection
================
Redirect Ports
==============
Interacting with a service is commonly done through a connection to a
port. When this service runs inside a container, one can connect to

View File

@@ -1,15 +1,16 @@
:title: Working with Links and Names
:description: How to create and use links and names
:keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming
:title: Link Containers
:description: How to create and use both links and names
:keywords: Examples, Usage, links, linking, docker, documentation, examples, names, name, container naming
.. _working_with_links_names:
Working with Links and Names
============================
Link Containers
===============
From version 0.6.5 you are now able to ``name`` a container and ``link`` it to another
container by referring to its name. This will create a parent -> child relationship
where the parent container can see selected information about its child.
From version 0.6.5 you are now able to ``name`` a container and
``link`` it to another container by referring to its name. This will
create a parent -> child relationship where the parent container can
see selected information about its child.
.. _run_name:
@@ -18,8 +19,9 @@ Container Naming
.. versionadded:: v0.6.5
You can now name your container by using the ``-name`` flag. If no name is provided, Docker
will automatically generate a name. You can see this name using the ``docker ps`` command.
You can now name your container by using the ``-name`` flag. If no
name is provided, Docker will automatically generate a name. You can
see this name using the ``docker ps`` command.
.. code-block:: bash
@@ -38,47 +40,53 @@ Links: service discovery for docker
.. versionadded:: v0.6.5
Links allow containers to discover and securely communicate with each other by using the
flag ``-link name:alias``. Inter-container communication can be disabled with the daemon
flag ``-icc=false``. With this flag set to false, Container A cannot access Container B
unless explicitly allowed via a link. This is a huge win for securing your containers.
When two containers are linked together Docker creates a parent child relationship
between the containers. The parent container will be able to access information via
environment variables of the child such as name, exposed ports, IP and other selected
environment variables.
Links allow containers to discover and securely communicate with each
other by using the flag ``-link name:alias``. Inter-container
communication can be disabled with the daemon flag
``-icc=false``. With this flag set to ``false``, Container A cannot
access Container B unless explicitly allowed via a link. This is a
huge win for securing your containers. When two containers are linked
together Docker creates a parent child relationship between the
containers. The parent container will be able to access information
via environment variables of the child such as name, exposed ports, IP
and other selected environment variables.
When linking two containers Docker will use the exposed ports of the container to create
a secure tunnel for the parent to access. If a database container only exposes port 8080
then the linked container will only be allowed to access port 8080 and nothing else if
When linking two containers Docker will use the exposed ports of the
container to create a secure tunnel for the parent to access. If a
database container only exposes port 8080 then the linked container
will only be allowed to access port 8080 and nothing else if
inter-container communication is set to false.
.. code-block:: bash
# Example: there is an image called redis-2.6 that exposes the port 6379 and starts redis-server.
# Let's name the container as "redis" based on that image and run it as daemon.
$ sudo docker run -d -name redis redis-2.6
We can issue all the commands that you would expect using the name "redis"; start, stop,
attach, using the name for our container. The name also allows us to link other containers
into this one.
Next, we can start a new web application that has a dependency on Redis and apply a link
to connect both containers. If you noticed when running our Redis server we did not use
the -p flag to publish the Redis port to the host system. Redis exposed port 6379 and
this is all we need to establish a link.
For example, there is an image called ``crosbymichael/redis`` that exposes the
port 6379 and starts the Redis server. Let's name the container as ``redis``
based on that image and run it as daemon.
.. code-block:: bash
$ sudo docker run -d -name redis crosbymichael/redis
We can issue all the commands that you would expect using the name
``redis``; start, stop, attach, using the name for our container. The
name also allows us to link other containers into this one.
Next, we can start a new web application that has a dependency on
Redis and apply a link to connect both containers. If you noticed when
running our Redis server we did not use the ``-p`` flag to publish the
Redis port to the host system. Redis exposed port 6379 and this is all
we need to establish a link.
.. code-block:: bash
# Linking the redis container as a child
$ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
When you specified -link redis:db you are telling docker to link the container named redis
into this new container with the alias db. Environment variables are prefixed with the alias
so that the parent container can access network and environment information from the containers
that are linked into it.
When you specified ``-link redis:db`` you are telling Docker to link
the container named ``redis`` into this new container with the alias
``db``. Environment variables are prefixed with the alias so that the
parent container can access network and environment information from
the containers that are linked into it.
If we inspect the environment variables of the second container, we would see all the information
about the child container.
If we inspect the environment variables of the second container, we
would see all the information about the child container.
.. code-block:: bash
@@ -100,5 +108,17 @@ about the child container.
_=/usr/bin/env
root@4c01db0b339c:/#
Accessing the network information along with the environment of the child container allows
us to easily connect to the Redis service on the specific IP and port in the environment.
Accessing the network information along with the environment of the
child container allows us to easily connect to the Redis service on
the specific IP and port in the environment.
Running ``docker ps`` shows the 2 containers, and the ``webapp/db``
alias name for the redis container.
.. code-block:: bash
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db

View File

@@ -1,11 +1,11 @@
:title: Working with Volumes
:title: Share Directories via Volumes
:description: How to create and share volumes
:keywords: Examples, Usage, volume, docker, documentation, examples
.. _volume_def:
Data Volume
===========
Share Directories via Volumes
=============================
.. versionadded:: v0.3.0
Data volumes have been available since version 1 of the
@@ -46,7 +46,7 @@ volumes to any container created from the image::
Mount Volumes from an Existing Container:
-----------------------------------------
The command below creates a new container which is runnning as daemon
The command below creates a new container which is running as daemon
``-d`` and with one volume ``/var/lib/couchdb``::
COUCH1=$(sudo docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)

View File

@@ -1,11 +1,11 @@
:title: Working With Repositories
:title: Share Images via Repositories
:description: Repositories allow users to share images.
:keywords: repo, repositories, usage, pull image, push image, image, documentation
.. _working_with_the_repository:
Working with Repositories
=========================
Share Images via Repositories
=============================
A *repository* is a hosted collection of tagged :ref:`images
<image_def>` that together create the file system for a container. The
@@ -152,6 +152,41 @@ or tag.
.. _using_private_repositories:
Trusted Builds
--------------
Trusted Builds automate the building and updating of images from GitHub, directly
on docker.io servers. It works by adding a commit hook to your selected repository,
triggering a build and update when you push a commit.
To setup a trusted build
++++++++++++++++++++++++
#. Create a `Docker Index account <https://index.docker.io/>`_ and login.
#. Link your GitHub account through the ``Link Accounts`` menu.
#. `Configure a Trusted build <https://index.docker.io/builds/>`_.
#. Pick a GitHub project that has a ``Dockerfile`` that you want to build.
#. Pick the branch you want to build (the default is the ``master`` branch).
#. Give the Trusted Build a name.
#. Assign an optional Docker tag to the Build.
#. Specify where the ``Dockerfile`` is located. The default is ``/``.
Once the Trusted Build is configured it will automatically trigger a build, and
in a few minutes, if there are no errors, you will see your new trusted build
on the Docker Index. It will will stay in sync with your GitHub repo until you
deactivate the Trusted Build.
If you want to see the status of your Trusted Builds you can go to your
`Trusted Builds page <https://index.docker.io/builds/>`_ on the Docker index,
and it will show you the status of your builds, and the build history.
Once you've created a Trusted Build you can deactive or delete it. You cannot
however push to a Trusted Build with the ``docker push`` command. You can only
manage it by committing code to your GitHub repository.
You can create multiple Trusted Builds per repository and configure them to
point to specific ``Dockerfile``'s or Git branches.
Private Repositories
--------------------

View File

@@ -35,7 +35,7 @@
%}
{#
This part is hopefully complex because things like |cut '/index/' are not available in spinx jinja
This part is hopefully complex because things like |cut '/index/' are not available in Sphinx jinja
and will make it crash. (and we need index/ out.
#}
<link rel="canonical" href="http://docs.docker.io/en/latest/

View File

@@ -410,3 +410,23 @@ dt:hover > a.headerlink {
.admonition.seealso {
border-color: #23cb1f;
}
.versionchanged,
.versionadded,
.versionmodified,
.deprecated {
font-size: larger;
font-weight: bold;
}
.versionchanged {
color: lightseagreen;
}
.versionadded {
color: mediumblue;
}
.deprecated {
color: orangered;
}

View File

@@ -1 +1 @@
Solomon Hykes <solomon@dotcloud.com>
#Solomon Hykes <solomon@dotcloud.com> Temporarily unavailable

View File

@@ -3,13 +3,14 @@ package engine
import (
"fmt"
"github.com/dotcloud/docker/utils"
"io"
"log"
"os"
"runtime"
"strings"
)
type Handler func(*Job) string
type Handler func(*Job) Status
var globalHandlers map[string]Handler
@@ -34,6 +35,9 @@ type Engine struct {
handlers map[string]Handler
hack Hack // data for temporary hackery (see hack.go)
id string
Stdout io.Writer
Stderr io.Writer
Stdin io.Reader
}
func (eng *Engine) Root() string {
@@ -70,7 +74,9 @@ func New(root string) (*Engine, error) {
log.Printf("WARNING: %s\n", err)
} else {
if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
}
}
}
if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
@@ -80,6 +86,9 @@ func New(root string) (*Engine, error) {
root: root,
handlers: make(map[string]Handler),
id: utils.RandomString(),
Stdout: os.Stdout,
Stderr: os.Stderr,
Stdin: os.Stdin,
}
// Copy existing global handlers
for k, v := range globalHandlers {
@@ -99,10 +108,12 @@ func (eng *Engine) Job(name string, args ...string) *Job {
Eng: eng,
Name: name,
Args: args,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Stdin: NewInput(),
Stdout: NewOutput(),
Stderr: NewOutput(),
env: &Env{},
}
job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
handler, exists := eng.handlers[name]
if exists {
job.handler = handler
@@ -112,5 +123,5 @@ func (eng *Engine) Job(name string, args ...string) *Job {
func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
return fmt.Fprintf(os.Stderr, prefixedFormat, args...)
return fmt.Fprintf(eng.Stderr, prefixedFormat, args...)
}

103
engine/engine_test.go Normal file
View File

@@ -0,0 +1,103 @@
package engine
import (
"io/ioutil"
"os"
"path"
"testing"
)
func TestRegister(t *testing.T) {
if err := Register("dummy1", nil); err != nil {
t.Fatal(err)
}
if err := Register("dummy1", nil); err == nil {
t.Fatalf("Expecting error, got none")
}
eng := newTestEngine(t)
//Should fail because globan handlers are copied
//at the engine creation
if err := eng.Register("dummy1", nil); err == nil {
t.Fatalf("Expecting error, got none")
}
if err := eng.Register("dummy2", nil); err != nil {
t.Fatal(err)
}
if err := eng.Register("dummy2", nil); err == nil {
t.Fatalf("Expecting error, got none")
}
}
func TestJob(t *testing.T) {
eng := newTestEngine(t)
job1 := eng.Job("dummy1", "--level=awesome")
if job1.handler != nil {
t.Fatalf("job1.handler should be empty")
}
h := func(j *Job) Status {
j.Printf("%s\n", j.Name)
return 42
}
eng.Register("dummy2", h)
job2 := eng.Job("dummy2", "--level=awesome")
if job2.handler == nil {
t.Fatalf("job2.handler shouldn't be nil")
}
if job2.handler(job2) != 42 {
t.Fatalf("handler dummy2 was not found in job2")
}
}
func TestEngineRoot(t *testing.T) {
tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
dir := path.Join(tmp, "dir")
eng, err := New(dir)
if err != nil {
t.Fatal(err)
}
if st, err := os.Stat(dir); err != nil {
t.Fatal(err)
} else if !st.IsDir() {
t.Fatalf("engine.New() created something other than a directory at %s", dir)
}
if r := eng.Root(); r != dir {
t.Fatalf("Expected: %v\nReceived: %v", dir, r)
}
}
func TestEngineString(t *testing.T) {
eng1 := newTestEngine(t)
defer os.RemoveAll(eng1.Root())
eng2 := newTestEngine(t)
defer os.RemoveAll(eng2.Root())
s1 := eng1.String()
s2 := eng2.String()
if eng1 == eng2 {
t.Fatalf("Different engines should have different names (%v == %v)", s1, s2)
}
}
func TestEngineLogf(t *testing.T) {
eng := newTestEngine(t)
defer os.RemoveAll(eng.Root())
input := "Test log line"
if n, err := eng.Logf("%s\n", input); err != nil {
t.Fatal(err)
} else if n < len(input) {
t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n)
}
}

234
engine/env.go Normal file
View File

@@ -0,0 +1,234 @@
package engine
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
)
type Env []string
func (env *Env) Get(key string) (value string) {
// FIXME: use Map()
for _, kv := range *env {
if strings.Index(kv, "=") == -1 {
continue
}
parts := strings.SplitN(kv, "=", 2)
if parts[0] != key {
continue
}
if len(parts) < 2 {
value = ""
} else {
value = parts[1]
}
}
return
}
func (env *Env) Exists(key string) bool {
_, exists := env.Map()[key]
return exists
}
func (env *Env) GetBool(key string) (value bool) {
s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
return false
}
return true
}
func (env *Env) SetBool(key string, value bool) {
if value {
env.Set(key, "1")
} else {
env.Set(key, "0")
}
}
func (env *Env) GetInt(key string) int {
return int(env.GetInt64(key))
}
func (env *Env) GetInt64(key string) int64 {
s := strings.Trim(env.Get(key), " \t")
val, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return -1
}
return val
}
func (env *Env) SetInt(key string, value int) {
env.Set(key, fmt.Sprintf("%d", value))
}
func (env *Env) SetInt64(key string, value int64) {
env.Set(key, fmt.Sprintf("%d", value))
}
// Returns nil if key not found
func (env *Env) GetList(key string) []string {
sval := env.Get(key)
if sval == "" {
return nil
}
l := make([]string, 0, 1)
if err := json.Unmarshal([]byte(sval), &l); err != nil {
l = append(l, sval)
}
return l
}
func (env *Env) GetJson(key string, iface interface{}) error {
sval := env.Get(key)
if sval == "" {
return nil
}
return json.Unmarshal([]byte(sval), iface)
}
func (env *Env) SetJson(key string, value interface{}) error {
sval, err := json.Marshal(value)
if err != nil {
return err
}
env.Set(key, string(sval))
return nil
}
func (env *Env) SetList(key string, value []string) error {
return env.SetJson(key, value)
}
func (env *Env) Set(key, value string) {
*env = append(*env, key+"="+value)
}
func NewDecoder(src io.Reader) *Decoder {
return &Decoder{
json.NewDecoder(src),
}
}
type Decoder struct {
*json.Decoder
}
func (decoder *Decoder) Decode() (*Env, error) {
m := make(map[string]interface{})
if err := decoder.Decoder.Decode(&m); err != nil {
return nil, err
}
env := &Env{}
for key, value := range m {
env.SetAuto(key, value)
}
return env, nil
}
// DecodeEnv decodes `src` as a json dictionary, and adds
// each decoded key-value pair to the environment.
//
// If `src` cannot be decoded as a json dictionary, an error
// is returned.
func (env *Env) Decode(src io.Reader) error {
m := make(map[string]interface{})
if err := json.NewDecoder(src).Decode(&m); err != nil {
return err
}
for k, v := range m {
env.SetAuto(k, v)
}
return nil
}
func (env *Env) SetAuto(k string, v interface{}) {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, ok := v.(float64); ok {
env.SetInt64(k, int64(fval))
} else if sval, ok := v.(string); ok {
env.Set(k, sval)
} else if val, err := json.Marshal(v); err == nil {
env.Set(k, string(val))
} else {
env.Set(k, fmt.Sprintf("%v", v))
}
}
func (env *Env) Encode(dst io.Writer) error {
m := make(map[string]interface{})
for k, v := range env.Map() {
var val interface{}
if err := json.Unmarshal([]byte(v), &val); err == nil {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, isFloat := val.(float64); isFloat {
val = int(fval)
}
m[k] = val
} else {
m[k] = v
}
}
if err := json.NewEncoder(dst).Encode(&m); err != nil {
return err
}
return nil
}
func (env *Env) WriteTo(dst io.Writer) (n int64, err error) {
// FIXME: return the number of bytes written to respect io.WriterTo
return 0, env.Encode(dst)
}
func (env *Env) Export(dst interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ExportEnv %s", err)
}
}()
var buf bytes.Buffer
// step 1: encode/marshal the env to an intermediary json representation
if err := env.Encode(&buf); err != nil {
return err
}
// step 2: decode/unmarshal the intermediary json into the destination object
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
return err
}
return nil
}
func (env *Env) Import(src interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ImportEnv: %s", err)
}
}()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(src); err != nil {
return err
}
if err := env.Decode(&buf); err != nil {
return err
}
return nil
}
func (env *Env) Map() map[string]string {
m := make(map[string]string)
for _, kv := range *env {
parts := strings.SplitN(kv, "=", 2)
m[parts[0]] = parts[1]
}
return m
}

View File

@@ -23,7 +23,101 @@ func TestSetenv(t *testing.T) {
if val := job.Getenv("foo"); val != "bar" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
job.Setenv("bar", "")
if val := job.Getenv("bar"); val != "" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
if val := job.Getenv("nonexistent"); val != "" {
t.Fatalf("Getenv returns incorrect value: %s", val)
}
}
func TestSetenvBool(t *testing.T) {
job := mkJob(t, "dummy")
job.SetenvBool("foo", true)
if val := job.GetenvBool("foo"); !val {
t.Fatalf("GetenvBool returns incorrect value: %t", val)
}
job.SetenvBool("bar", false)
if val := job.GetenvBool("bar"); val {
t.Fatalf("GetenvBool returns incorrect value: %t", val)
}
if val := job.GetenvBool("nonexistent"); val {
t.Fatalf("GetenvBool returns incorrect value: %t", val)
}
}
func TestSetenvInt(t *testing.T) {
job := mkJob(t, "dummy")
job.SetenvInt("foo", -42)
if val := job.GetenvInt("foo"); val != -42 {
t.Fatalf("GetenvInt returns incorrect value: %d", val)
}
job.SetenvInt("bar", 42)
if val := job.GetenvInt("bar"); val != 42 {
t.Fatalf("GetenvInt returns incorrect value: %d", val)
}
if val := job.GetenvInt("nonexistent"); val != -1 {
t.Fatalf("GetenvInt returns incorrect value: %d", val)
}
}
func TestSetenvList(t *testing.T) {
job := mkJob(t, "dummy")
job.SetenvList("foo", []string{"bar"})
if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" {
t.Fatalf("GetenvList returns incorrect value: %v", val)
}
job.SetenvList("bar", nil)
if val := job.GetenvList("bar"); val != nil {
t.Fatalf("GetenvList returns incorrect value: %v", val)
}
if val := job.GetenvList("nonexistent"); val != nil {
t.Fatalf("GetenvList returns incorrect value: %v", val)
}
}
func TestImportEnv(t *testing.T) {
type dummy struct {
DummyInt int
DummyStringArray []string
}
job := mkJob(t, "dummy")
if err := job.ImportEnv(&dummy{42, []string{"foo", "bar"}}); err != nil {
t.Fatal(err)
}
dmy := dummy{}
if err := job.ExportEnv(&dmy); err != nil {
t.Fatal(err)
}
if dmy.DummyInt != 42 {
t.Fatalf("Expected 42, got %d", dmy.DummyInt)
}
if len(dmy.DummyStringArray) != 2 || dmy.DummyStringArray[0] != "foo" || dmy.DummyStringArray[1] != "bar" {
t.Fatalf("Expected {foo, bar}, got %v", dmy.DummyStringArray)
}
}
func TestEnviron(t *testing.T) {
job := mkJob(t, "dummy")
job.Setenv("foo", "bar")
val, exists := job.Environ()["foo"]
if !exists {
t.Fatalf("foo not found in the environ")
}
if val != "bar" {
t.Fatalf("bar not found in the environ")
}
}

24
engine/helpers_test.go Normal file
View File

@@ -0,0 +1,24 @@
package engine
import (
"github.com/dotcloud/docker/utils"
"testing"
)
var globalTestID string
func newTestEngine(t *testing.T) *Engine {
tmp, err := utils.TestDirectory("")
if err != nil {
t.Fatal(err)
}
eng, err := New(tmp)
if err != nil {
t.Fatal(err)
}
return eng
}
func mkJob(t *testing.T, name string, args ...string) *Job {
return newTestEngine(t).Job(name, args...)
}

40
engine/http.go Normal file
View File

@@ -0,0 +1,40 @@
package engine
import (
"path"
"net/http"
)
// ServeHTTP executes a job as specified by the http request `r`, and sends the
// result as an http response.
// This method allows an Engine instance to be passed as a standard http.Handler interface.
//
// Note that the protocol used in this methid is a convenience wrapper and is not the canonical
// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing,
// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response
// once data has been written to the body, which makes it inconvenient to return metadata such
// as the exit status.
//
func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
jobName := path.Base(r.URL.Path)
jobArgs, exists := r.URL.Query()["a"]
if !exists {
jobArgs = []string{}
}
w.Header().Set("Job-Name", jobName)
for _, arg := range(jobArgs) {
w.Header().Add("Job-Args", arg)
}
job := eng.Job(jobName, jobArgs...)
job.Stdout.Add(w)
job.Stderr.Add(w)
// FIXME: distinguish job status from engine error in Run()
// The former should be passed as a special header, the former
// should cause a 500 status
w.WriteHeader(http.StatusOK)
// The exit status cannot be sent reliably with HTTP1, because headers
// can only be sent before the body.
// (we could possibly use http footers via chunked encoding, but I couldn't find
// how to use them in net/http)
job.Run()
}

View File

@@ -1,16 +1,10 @@
package engine
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"sync"
"time"
)
// A job is the fundamental unit of work in the docker engine.
@@ -30,127 +24,76 @@ type Job struct {
Eng *Engine
Name string
Args []string
env []string
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
handler func(*Job) string
status string
env *Env
Stdout *Output
Stderr *Output
Stdin *Input
handler Handler
status Status
end time.Time
onExit []func()
}
type Status int
const (
StatusOK Status = 0
StatusErr Status = 1
StatusNotFound Status = 127
)
// Run executes the job and blocks until the job completes.
// If the job returns a failure status, an error is returned
// which includes the status.
func (job *Job) Run() error {
defer func() {
var wg sync.WaitGroup
for _, f := range job.onExit {
wg.Add(1)
go func(f func()) {
f()
wg.Done()
}(f)
}
wg.Wait()
}()
if job.Stdout != nil && job.Stdout != os.Stdout {
job.Stdout = io.MultiWriter(job.Stdout, os.Stdout)
}
if job.Stderr != nil && job.Stderr != os.Stderr {
job.Stderr = io.MultiWriter(job.Stderr, os.Stderr)
// FIXME: make this thread-safe
// FIXME: implement wait
if !job.end.IsZero() {
return fmt.Errorf("%s: job has already completed", job.Name)
}
// Log beginning and end of the job
job.Eng.Logf("+job %s", job.CallString())
defer func() {
job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
}()
var errorMessage string
job.Stderr.AddString(&errorMessage)
if job.handler == nil {
job.status = "command not found"
job.Errorf("%s: command not found", job.Name)
job.status = 127
} else {
job.status = job.handler(job)
job.end = time.Now()
}
if job.status != "0" {
return fmt.Errorf("%s: %s", job.Name, job.status)
// Wait for all background tasks to complete
if err := job.Stdout.Close(); err != nil {
return err
}
if err := job.Stderr.Close(); err != nil {
return err
}
if job.status != 0 {
return fmt.Errorf("%s: %s", job.Name, errorMessage)
}
return nil
}
func (job *Job) StdoutParseLines(dst *[]string, limit int) {
job.parseLines(job.StdoutPipe(), dst, limit)
}
func (job *Job) StderrParseLines(dst *[]string, limit int) {
job.parseLines(job.StderrPipe(), dst, limit)
}
func (job *Job) parseLines(src io.Reader, dst *[]string, limit int) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
scanner := bufio.NewScanner(src)
for scanner.Scan() {
// If the limit is reached, flush the rest of the source and return
if limit > 0 && len(*dst) >= limit {
io.Copy(ioutil.Discard, src)
return
}
line := scanner.Text()
// Append the line (with delimitor removed)
*dst = append(*dst, line)
}
}()
job.onExit = append(job.onExit, wg.Wait)
}
func (job *Job) StdoutParseString(dst *string) {
lines := make([]string, 0, 1)
job.StdoutParseLines(&lines, 1)
job.onExit = append(job.onExit, func() {
if len(lines) >= 1 {
*dst = lines[0]
}
})
}
func (job *Job) StderrParseString(dst *string) {
lines := make([]string, 0, 1)
job.StderrParseLines(&lines, 1)
job.onExit = append(job.onExit, func() { *dst = lines[0] })
}
func (job *Job) StdoutPipe() io.ReadCloser {
r, w := io.Pipe()
job.Stdout = w
job.onExit = append(job.onExit, func() { w.Close() })
return r
}
func (job *Job) StderrPipe() io.ReadCloser {
r, w := io.Pipe()
job.Stderr = w
job.onExit = append(job.onExit, func() { w.Close() })
return r
}
func (job *Job) CallString() string {
return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
}
func (job *Job) StatusString() string {
// FIXME: if a job returns the empty string, it will be printed
// as not having returned.
// (this only affects String which is a convenience function).
if job.status != "" {
var okerr string
if job.status == "0" {
okerr = "OK"
} else {
okerr = "ERR"
}
return fmt.Sprintf(" = %s (%s)", okerr, job.status)
// If the job hasn't completed, status string is empty
if job.end.IsZero() {
return ""
}
return ""
var okerr string
if job.status == StatusOK {
okerr = "OK"
} else {
okerr = "ERR"
}
return fmt.Sprintf(" = %s (%d)", okerr, job.status)
}
// String returns a human-readable description of `job`
@@ -159,168 +102,77 @@ func (job *Job) String() string {
}
func (job *Job) Getenv(key string) (value string) {
for _, kv := range job.env {
if strings.Index(kv, "=") == -1 {
continue
}
parts := strings.SplitN(kv, "=", 2)
if parts[0] != key {
continue
}
if len(parts) < 2 {
value = ""
} else {
value = parts[1]
}
}
return
return job.env.Get(key)
}
func (job *Job) GetenvBool(key string) (value bool) {
s := strings.ToLower(strings.Trim(job.Getenv(key), " \t"))
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
return false
}
return true
return job.env.GetBool(key)
}
func (job *Job) SetenvBool(key string, value bool) {
if value {
job.Setenv(key, "1")
} else {
job.Setenv(key, "0")
}
job.env.SetBool(key, value)
}
func (job *Job) GetenvInt(key string) int64 {
s := strings.Trim(job.Getenv(key), " \t")
val, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return -1
}
return val
func (job *Job) GetenvInt64(key string) int64 {
return job.env.GetInt64(key)
}
func (job *Job) SetenvInt(key string, value int64) {
job.Setenv(key, fmt.Sprintf("%d", value))
func (job *Job) GetenvInt(key string) int {
return job.env.GetInt(key)
}
func (job *Job) SetenvInt64(key string, value int64) {
job.env.SetInt64(key, value)
}
func (job *Job) SetenvInt(key string, value int) {
job.env.SetInt(key, value)
}
// Returns nil if key not found
func (job *Job) GetenvList(key string) []string {
sval := job.Getenv(key)
l := make([]string, 0, 1)
if err := json.Unmarshal([]byte(sval), &l); err != nil {
l = append(l, sval)
}
return l
return job.env.GetList(key)
}
func (job *Job) GetenvJson(key string, iface interface{}) error {
return job.env.GetJson(key, iface)
}
func (job *Job) SetenvJson(key string, value interface{}) error {
sval, err := json.Marshal(value)
if err != nil {
return err
}
job.Setenv(key, string(sval))
return nil
return job.env.SetJson(key, value)
}
func (job *Job) SetenvList(key string, value []string) error {
return job.SetenvJson(key, value)
return job.env.SetJson(key, value)
}
func (job *Job) Setenv(key, value string) {
job.env = append(job.env, key+"="+value)
job.env.Set(key, value)
}
// DecodeEnv decodes `src` as a json dictionary, and adds
// each decoded key-value pair to the environment.
//
// If `text` cannot be decoded as a json dictionary, an error
// If `src` cannot be decoded as a json dictionary, an error
// is returned.
func (job *Job) DecodeEnv(src io.Reader) error {
m := make(map[string]interface{})
if err := json.NewDecoder(src).Decode(&m); err != nil {
return err
}
for k, v := range m {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, ok := v.(float64); ok {
job.SetenvInt(k, int64(fval))
} else if sval, ok := v.(string); ok {
job.Setenv(k, sval)
} else if val, err := json.Marshal(v); err == nil {
job.Setenv(k, string(val))
} else {
job.Setenv(k, fmt.Sprintf("%v", v))
}
}
return nil
return job.env.Decode(src)
}
func (job *Job) EncodeEnv(dst io.Writer) error {
m := make(map[string]interface{})
for k, v := range job.Environ() {
var val interface{}
if err := json.Unmarshal([]byte(v), &val); err == nil {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
if fval, isFloat := val.(float64); isFloat {
val = int(fval)
}
m[k] = val
} else {
m[k] = v
}
}
if err := json.NewEncoder(dst).Encode(&m); err != nil {
return err
}
return nil
return job.env.Encode(dst)
}
func (job *Job) ExportEnv(dst interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ExportEnv %s", err)
}
}()
var buf bytes.Buffer
// step 1: encode/marshal the env to an intermediary json representation
if err := job.EncodeEnv(&buf); err != nil {
return err
}
// step 2: decode/unmarshal the intermediary json into the destination object
if err := json.NewDecoder(&buf).Decode(dst); err != nil {
return err
}
return nil
return job.env.Export(dst)
}
func (job *Job) ImportEnv(src interface{}) (err error) {
defer func() {
if err != nil {
err = fmt.Errorf("ImportEnv: %s", err)
}
}()
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(src); err != nil {
return err
}
if err := job.DecodeEnv(&buf); err != nil {
return err
}
return nil
return job.env.Import(src)
}
func (job *Job) Environ() map[string]string {
m := make(map[string]string)
for _, kv := range job.env {
parts := strings.SplitN(kv, "=", 2)
m[parts[0]] = parts[1]
}
return m
return job.env.Map()
}
func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
@@ -334,5 +186,8 @@ func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
return fmt.Fprintf(job.Stderr, format, args...)
}
func (job *Job) Error(err error) (int, error) {
return fmt.Fprintf(job.Stderr, "%s", err)
}

80
engine/job_test.go Normal file
View File

@@ -0,0 +1,80 @@
package engine
import (
"os"
"testing"
)
func TestJobStatusOK(t *testing.T) {
eng := newTestEngine(t)
defer os.RemoveAll(eng.Root())
eng.Register("return_ok", func(job *Job) Status { return StatusOK })
err := eng.Job("return_ok").Run()
if err != nil {
t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
}
}
func TestJobStatusErr(t *testing.T) {
eng := newTestEngine(t)
defer os.RemoveAll(eng.Root())
eng.Register("return_err", func(job *Job) Status { return StatusErr })
err := eng.Job("return_err").Run()
if err == nil {
t.Fatalf("When a job returns StatusErr, Run() should return an error")
}
}
func TestJobStatusNotFound(t *testing.T) {
eng := newTestEngine(t)
defer os.RemoveAll(eng.Root())
eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound })
err := eng.Job("return_not_found").Run()
if err == nil {
t.Fatalf("When a job returns StatusNotFound, Run() should return an error")
}
}
func TestJobStdoutString(t *testing.T) {
eng := newTestEngine(t)
defer os.RemoveAll(eng.Root())
// FIXME: test multiple combinations of output and status
eng.Register("say_something_in_stdout", func(job *Job) Status {
job.Printf("Hello world\n")
return StatusOK
})
job := eng.Job("say_something_in_stdout")
var output string
if err := job.Stdout.AddString(&output); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
if expectedOutput := "Hello world"; output != expectedOutput {
t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
}
}
func TestJobStderrString(t *testing.T) {
eng := newTestEngine(t)
defer os.RemoveAll(eng.Root())
// FIXME: test multiple combinations of output and status
eng.Register("say_something_in_stderr", func(job *Job) Status {
job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n")
return StatusOK
})
job := eng.Job("say_something_in_stderr")
var output string
if err := job.Stderr.AddString(&output); err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
if expectedOutput := "Something happened"; output != expectedOutput {
t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
}
}

192
engine/streams.go Normal file
View File

@@ -0,0 +1,192 @@
package engine
import (
"bufio"
"container/ring"
"fmt"
"io"
"sync"
)
type Output struct {
sync.Mutex
dests []io.Writer
tasks sync.WaitGroup
}
// NewOutput returns a new Output object with no destinations attached.
// Writing to an empty Output will cause the written data to be discarded.
func NewOutput() *Output {
return &Output{}
}
// Add attaches a new destination to the Output. Any data subsequently written
// to the output will be written to the new destination in addition to all the others.
// This method is thread-safe.
// FIXME: Add cannot fail
func (o *Output) Add(dst io.Writer) error {
o.Mutex.Lock()
defer o.Mutex.Unlock()
o.dests = append(o.dests, dst)
return nil
}
// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination,
// and returns its reading end for consumption by the caller.
// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package.
// This method is thread-safe.
func (o *Output) AddPipe() (io.Reader, error) {
r, w := io.Pipe()
o.Add(w)
return r, nil
}
// AddTail starts a new goroutine which will read all subsequent data written to the output,
// line by line, and append the last `n` lines to `dst`.
func (o *Output) AddTail(dst *[]string, n int) error {
src, err := o.AddPipe()
if err != nil {
return err
}
o.tasks.Add(1)
go func() {
defer o.tasks.Done()
Tail(src, n, dst)
}()
return nil
}
// AddString starts a new goroutine which will read all subsequent data written to the output,
// line by line, and store the last line into `dst`.
func (o *Output) AddString(dst *string) error {
src, err := o.AddPipe()
if err != nil {
return err
}
o.tasks.Add(1)
go func() {
defer o.tasks.Done()
lines := make([]string, 0, 1)
Tail(src, 1, &lines)
if len(lines) == 0 {
*dst = ""
} else {
*dst = lines[0]
}
}()
return nil
}
// Write writes the same data to all registered destinations.
// This method is thread-safe.
func (o *Output) Write(p []byte) (n int, err error) {
o.Mutex.Lock()
defer o.Mutex.Unlock()
var firstErr error
for _, dst := range o.dests {
_, err := dst.Write(p)
if err != nil && firstErr == nil {
firstErr = err
}
}
return len(p), firstErr
}
// Close unregisters all destinations and waits for all background
// AddTail and AddString tasks to complete.
// The Close method of each destination is called if it exists.
func (o *Output) Close() error {
o.Mutex.Lock()
defer o.Mutex.Unlock()
var firstErr error
for _, dst := range o.dests {
if closer, ok := dst.(io.WriteCloser); ok {
err := closer.Close()
if err != nil && firstErr == nil {
firstErr = err
}
}
}
o.tasks.Wait()
return firstErr
}
type Input struct {
src io.Reader
sync.Mutex
}
// NewInput returns a new Input object with no source attached.
// Reading to an empty Input will return io.EOF.
func NewInput() *Input {
return &Input{}
}
// Read reads from the input in a thread-safe way.
func (i *Input) Read(p []byte) (n int, err error) {
i.Mutex.Lock()
defer i.Mutex.Unlock()
if i.src == nil {
return 0, io.EOF
}
return i.src.Read(p)
}
// Add attaches a new source to the input.
// Add can only be called once per input. Subsequent calls will
// return an error.
func (i *Input) Add(src io.Reader) error {
i.Mutex.Lock()
defer i.Mutex.Unlock()
if i.src != nil {
return fmt.Errorf("Maximum number of sources reached: 1")
}
i.src = src
return nil
}
// Tail reads from `src` line per line, and returns the last `n` lines as an array.
// A ring buffer is used to only store `n` lines at any time.
func Tail(src io.Reader, n int, dst *[]string) {
scanner := bufio.NewScanner(src)
r := ring.New(n)
for scanner.Scan() {
if n == 0 {
continue
}
r.Value = scanner.Text()
r = r.Next()
}
r.Do(func(v interface{}) {
if v == nil {
return
}
*dst = append(*dst, v.(string))
})
}
// AddEnv starts a new goroutine which will decode all subsequent data
// as a stream of json-encoded objects, and point `dst` to the last
// decoded object.
// The result `env` can be queried using the type-neutral Env interface.
// It is not safe to query `env` until the Output is closed.
func (o *Output) AddEnv() (dst *Env, err error) {
src, err := o.AddPipe()
if err != nil {
return nil, err
}
dst = &Env{}
o.tasks.Add(1)
go func() {
defer o.tasks.Done()
decoder := NewDecoder(src)
for {
env, err := decoder.Decode()
if err != nil {
return
}
*dst = *env
}
}()
return dst, nil
}

294
engine/streams_test.go Normal file
View File

@@ -0,0 +1,294 @@
package engine
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
)
func TestOutputAddString(t *testing.T) {
var testInputs = [][2]string{
{
"hello, world!",
"hello, world!",
},
{
"One\nTwo\nThree",
"Three",
},
{
"",
"",
},
{
"A line\nThen another nl-terminated line\n",
"Then another nl-terminated line",
},
{
"A line followed by an empty line\n\n",
"",
},
}
for _, testData := range testInputs {
input := testData[0]
expectedOutput := testData[1]
o := NewOutput()
var output string
if err := o.AddString(&output); err != nil {
t.Error(err)
}
if n, err := o.Write([]byte(input)); err != nil {
t.Error(err)
} else if n != len(input) {
t.Errorf("Expected %d, got %d", len(input), n)
}
o.Close()
if output != expectedOutput {
t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output)
}
}
}
type sentinelWriteCloser struct {
calledWrite bool
calledClose bool
}
func (w *sentinelWriteCloser) Write(p []byte) (int, error) {
w.calledWrite = true
return len(p), nil
}
func (w *sentinelWriteCloser) Close() error {
w.calledClose = true
return nil
}
func TestOutputAddEnv(t *testing.T) {
input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}"
o := NewOutput()
result, err := o.AddEnv()
if err != nil {
t.Fatal(err)
}
o.Write([]byte(input))
o.Close()
if v := result.Get("foo"); v != "bar" {
t.Errorf("Expected %v, got %v", "bar", v)
}
if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 {
t.Errorf("Expected %v, got %v", 42, v)
}
if v := result.Get("this-value-doesnt-exist"); v != "" {
t.Errorf("Expected %v, got %v", "", v)
}
}
func TestOutputAddClose(t *testing.T) {
o := NewOutput()
var s sentinelWriteCloser
if err := o.Add(&s); err != nil {
t.Fatal(err)
}
if err := o.Close(); err != nil {
t.Fatal(err)
}
// Write data after the output is closed.
// Write should succeed, but no destination should receive it.
if _, err := o.Write([]byte("foo bar")); err != nil {
t.Fatal(err)
}
if !s.calledClose {
t.Fatal("Output.Close() didn't close the destination")
}
}
func TestOutputAddPipe(t *testing.T) {
var testInputs = []string{
"hello, world!",
"One\nTwo\nThree",
"",
"A line\nThen another nl-terminated line\n",
"A line followed by an empty line\n\n",
}
for _, input := range testInputs {
expectedOutput := input
o := NewOutput()
r, err := o.AddPipe()
if err != nil {
t.Fatal(err)
}
go func(o *Output) {
if n, err := o.Write([]byte(input)); err != nil {
t.Error(err)
} else if n != len(input) {
t.Errorf("Expected %d, got %d", len(input), n)
}
if err := o.Close(); err != nil {
t.Error(err)
}
}(o)
output, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if string(output) != expectedOutput {
t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output)
}
}
}
func TestTail(t *testing.T) {
var tests = make(map[string][][]string)
tests["hello, world!"] = [][]string{
{},
{"hello, world!"},
{"hello, world!"},
{"hello, world!"},
}
tests["One\nTwo\nThree"] = [][]string{
{},
{"Three"},
{"Two", "Three"},
{"One", "Two", "Three"},
}
for input, outputs := range tests {
for n, expectedOutput := range outputs {
var output []string
Tail(strings.NewReader(input), n, &output)
if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", expectedOutput, output)
}
}
}
}
func TestOutputAddTail(t *testing.T) {
var tests = make(map[string][][]string)
tests["hello, world!"] = [][]string{
{},
{"hello, world!"},
{"hello, world!"},
{"hello, world!"},
}
tests["One\nTwo\nThree"] = [][]string{
{},
{"Three"},
{"Two", "Three"},
{"One", "Two", "Three"},
}
for input, outputs := range tests {
for n, expectedOutput := range outputs {
o := NewOutput()
var output []string
if err := o.AddTail(&output, n); err != nil {
t.Error(err)
}
if n, err := o.Write([]byte(input)); err != nil {
t.Error(err)
} else if n != len(input) {
t.Errorf("Expected %d, got %d", len(input), n)
}
o.Close()
if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output)
}
}
}
}
func lastLine(txt string) string {
scanner := bufio.NewScanner(strings.NewReader(txt))
var lastLine string
for scanner.Scan() {
lastLine = scanner.Text()
}
return lastLine
}
func TestOutputAdd(t *testing.T) {
o := NewOutput()
b := &bytes.Buffer{}
o.Add(b)
input := "hello, world!"
if n, err := o.Write([]byte(input)); err != nil {
t.Fatal(err)
} else if n != len(input) {
t.Fatalf("Expected %d, got %d", len(input), n)
}
if output := b.String(); output != input {
t.Fatal("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output)
}
}
func TestOutputWriteError(t *testing.T) {
o := NewOutput()
buf := &bytes.Buffer{}
o.Add(buf)
r, w := io.Pipe()
input := "Hello there"
expectedErr := fmt.Errorf("This is an error")
r.CloseWithError(expectedErr)
o.Add(w)
n, err := o.Write([]byte(input))
if err != expectedErr {
t.Fatalf("Output.Write() should return the first error encountered, if any")
}
if buf.String() != input {
t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error")
}
if n != len(input) {
t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination")
}
}
func TestInputAddEmpty(t *testing.T) {
i := NewInput()
var b bytes.Buffer
if err := i.Add(&b); err != nil {
t.Fatal(err)
}
data, err := ioutil.ReadAll(i)
if err != nil {
t.Fatal(err)
}
if len(data) > 0 {
t.Fatalf("Read from empty input shoul yield no data")
}
}
func TestInputAddTwo(t *testing.T) {
i := NewInput()
var b1 bytes.Buffer
// First add should succeed
if err := i.Add(&b1); err != nil {
t.Fatal(err)
}
var b2 bytes.Buffer
// Second add should fail
if err := i.Add(&b2); err == nil {
t.Fatalf("Adding a second source should return an error")
}
}
func TestInputAddNotEmpty(t *testing.T) {
i := NewInput()
b := bytes.NewBufferString("hello world\nabc")
expectedResult := b.String()
i.Add(b)
result, err := ioutil.ReadAll(i)
if err != nil {
t.Fatal(err)
}
if string(result) != expectedResult {
t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result)
}
}

View File

@@ -1,42 +0,0 @@
package engine
import (
"fmt"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"runtime"
"strings"
"testing"
)
var globalTestID string
func init() {
Register("dummy", func(job *Job) string { return "" })
}
func newTestEngine(t *testing.T) *Engine {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
if globalTestID == "" {
globalTestID = utils.RandomString()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, callerShortName)
root, err := ioutil.TempDir("", prefix)
if err != nil {
t.Fatal(err)
}
eng, err := New(root)
if err != nil {
t.Fatal(err)
}
return eng
}
func mkJob(t *testing.T, name string, args ...string) *Job {
return newTestEngine(t).Job(name, args...)
}

View File

@@ -52,7 +52,9 @@ func (graph *Graph) restore() error {
}
for _, v := range dir {
id := v.Name()
graph.idIndex.Add(id)
if graph.driver.Exists(id) {
graph.idIndex.Add(id)
}
}
return nil
}
@@ -92,11 +94,25 @@ func (graph *Graph) Get(name string) (*Image, error) {
return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID)
}
img.graph = graph
if img.Size == 0 {
size, err := utils.TreeSize(rootfs)
if err != nil {
return nil, fmt.Errorf("Error computing size of rootfs %s: %s", img.ID, err)
if img.Size < 0 {
var size int64
if img.Parent == "" {
if size, err = utils.TreeSize(rootfs); err != nil {
return nil, err
}
} else {
parentFs, err := graph.driver.Get(img.Parent)
if err != nil {
return nil, err
}
changes, err := archive.ChangesDirs(rootfs, parentFs)
if err != nil {
return nil, err
}
size = archive.ChangesSize(rootfs, changes)
}
img.Size = size
if err := img.SaveSize(graph.imageRoot(id)); err != nil {
return nil, err
@@ -110,7 +126,7 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm
img := &Image{
ID: GenerateID(),
Comment: comment,
Created: time.Now(),
Created: time.Now().UTC(),
DockerVersion: VERSION,
Author: author,
Config: config,
@@ -129,7 +145,15 @@ func (graph *Graph) Create(layerData archive.Archive, container *Container, comm
// Register imports a pre-existing image into the graph.
// FIXME: pass img as first argument
func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) error {
func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Image) (err error) {
defer func() {
// If any error occurs, remove the new dir from the driver.
// Don't check for errors since the dir might not have been created.
// FIXME: this leaves a possible race condition.
if err != nil {
graph.driver.Remove(img.ID)
}
}()
if err := ValidateID(img.ID); err != nil {
return err
}
@@ -137,6 +161,20 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.Archive, img *Im
if graph.Exists(img.ID) {
return fmt.Errorf("Image %s already exists", img.ID)
}
// Ensure that the image root does not exist on the filesystem
// when it is not registered in the graph.
// This is common when you switch from one graph driver to another
if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) {
return err
}
// If the driver has this ID but the graph doesn't, remove it from the driver to start fresh.
// (the graph is the source of truth).
// Ignore errors, since we don't know if the driver correctly returns ErrNotExist.
// (FIXME: make that mandatory for drivers).
graph.driver.Remove(img.ID)
tmp, err := graph.Mktemp("")
defer os.RemoveAll(tmp)
if err != nil {
@@ -177,11 +215,11 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression,
if err != nil {
return nil, err
}
a, err := image.TarLayer(compression)
a, err := image.TarLayer()
if err != nil {
return nil, err
}
return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf.FormatProgress("", "Buffering to disk", "%v/%v (%v)"), sf, true), tmp)
return archive.NewTempArchive(utils.ProgressReader(ioutil.NopCloser(a), 0, output, sf, false, utils.TruncateID(id), "Buffering to disk"), tmp)
}
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
@@ -353,3 +391,7 @@ func (graph *Graph) Heads() (map[string]*Image, error) {
func (graph *Graph) imageRoot(id string) string {
return path.Join(graph.Root, id)
}
func (graph *Graph) Driver() graphdriver.Driver {
return graph.driver
}

View File

@@ -1,297 +0,0 @@
package docker
import (
"archive/tar"
"bytes"
"errors"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/graphdriver"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"testing"
"time"
)
func TestInit(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
// Root should exist
if _, err := os.Stat(graph.Root); err != nil {
t.Fatal(err)
}
// Map() should be empty
if l, err := graph.Map(); err != nil {
t.Fatal(err)
} else if len(l) != 0 {
t.Fatalf("len(Map()) should return %d, not %d", 0, len(l))
}
}
// Test that Register can be interrupted cleanly without side effects
func TestInterruptedRegister(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
image := &Image{
ID: GenerateID(),
Comment: "testing",
Created: time.Now(),
}
go graph.Register(nil, badArchive, image)
time.Sleep(200 * time.Millisecond)
w.CloseWithError(errors.New("But I'm not a tarball!")) // (Nobody's perfect, darling)
if _, err := graph.Get(image.ID); err == nil {
t.Fatal("Image should not exist after Register is interrupted")
}
// Registering the same image again should succeed if the first register was interrupted
goodArchive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
if err := graph.Register(nil, goodArchive, image); err != nil {
t.Fatal(err)
}
}
// FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
// create multiple, check the amount of images and paths, etc..)
func TestGraphCreate(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
if err := ValidateID(image.ID); err != nil {
t.Fatal(err)
}
if image.Comment != "Testing" {
t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment)
}
if image.DockerVersion != VERSION {
t.Fatalf("Wrong docker_version: should be '%s', not '%s'", VERSION, image.DockerVersion)
}
images, err := graph.Map()
if err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
if images[image.ID] == nil {
t.Fatalf("Could not find image with id %s", image.ID)
}
}
func TestRegister(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image := &Image{
ID: GenerateID(),
Comment: "testing",
Created: time.Now(),
}
err = graph.Register(nil, archive, image)
if err != nil {
t.Fatal(err)
}
if images, err := graph.Map(); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
if resultImg, err := graph.Get(image.ID); err != nil {
t.Fatal(err)
} else {
if resultImg.ID != image.ID {
t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.ID, resultImg.ID)
}
if resultImg.Comment != image.Comment {
t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment)
}
}
}
// Test that an image can be deleted by its shorthand prefix
func TestDeletePrefix(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
img := createTestImage(graph, t)
if err := graph.Delete(utils.TruncateID(img.ID)); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 0)
}
func createTestImage(graph *Graph, t *testing.T) *Image {
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
img, err := graph.Create(archive, nil, "Test image", "", nil)
if err != nil {
t.Fatal(err)
}
return img
}
func TestDelete(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 0)
img, err := graph.Create(archive, nil, "Bla bla", "", nil)
if err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 1)
if err := graph.Delete(img.ID); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 0)
archive, err = fakeTar()
if err != nil {
t.Fatal(err)
}
// Test 2 create (same name) / 1 delete
img1, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
archive, err = fakeTar()
if err != nil {
t.Fatal(err)
}
if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 2)
if err := graph.Delete(img1.ID); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 1)
// Test delete wrong name
if err := graph.Delete("Not_foo"); err == nil {
t.Fatalf("Deleting wrong ID should return an error")
}
assertNImages(graph, t, 1)
archive, err = fakeTar()
if err != nil {
t.Fatal(err)
}
// Test delete twice (pull -> rm -> pull -> rm)
if err := graph.Register(nil, archive, img1); err != nil {
t.Fatal(err)
}
if err := graph.Delete(img1.ID); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 1)
}
func TestByParent(t *testing.T) {
archive1, _ := fakeTar()
archive2, _ := fakeTar()
archive3, _ := fakeTar()
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
parentImage := &Image{
ID: GenerateID(),
Comment: "parent",
Created: time.Now(),
Parent: "",
}
childImage1 := &Image{
ID: GenerateID(),
Comment: "child1",
Created: time.Now(),
Parent: parentImage.ID,
}
childImage2 := &Image{
ID: GenerateID(),
Comment: "child2",
Created: time.Now(),
Parent: parentImage.ID,
}
_ = graph.Register(nil, archive1, parentImage)
_ = graph.Register(nil, archive2, childImage1)
_ = graph.Register(nil, archive3, childImage2)
byParent, err := graph.ByParent()
if err != nil {
t.Fatal(err)
}
numChildren := len(byParent[parentImage.ID])
if numChildren != 2 {
t.Fatalf("Expected 2 children, found %d", numChildren)
}
}
func assertNImages(graph *Graph, t *testing.T, n int) {
if images, err := graph.Map(); err != nil {
t.Fatal(err)
} else if actualN := len(images); actualN != n {
t.Fatalf("Expected %d images, found %d", n, actualN)
}
}
/*
* HELPER FUNCTIONS
*/
func tempGraph(t *testing.T) *Graph {
tmp, err := ioutil.TempDir("", "docker-graph-")
if err != nil {
t.Fatal(err)
}
backend, err := graphdriver.New(tmp)
if err != nil {
t.Fatal(err)
}
graph, err := NewGraph(tmp, backend)
if err != nil {
t.Fatal(err)
}
return graph
}
func testArchive(t *testing.T) archive.Archive {
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
return archive
}
func fakeTar() (io.Reader, error) {
content := []byte("Hello world!\n")
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
hdr := new(tar.Header)
hdr.Size = int64(len(content))
hdr.Name = name
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
tw.Write([]byte(content))
}
tw.Close()
return buf, nil
}

View File

@@ -26,11 +26,11 @@ import (
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/graphdriver"
"github.com/dotcloud/docker/utils"
"log"
"os"
"os/exec"
"path"
"strings"
"syscall"
)
func init() {
@@ -103,8 +103,12 @@ func (Driver) String() string {
return "aufs"
}
func (Driver) Status() [][2]string {
return nil
func (a Driver) Status() [][2]string {
ids, _ := loadIds(path.Join(a.rootPath(), "layers"))
return [][2]string{
{"Root Dir", a.rootPath()},
{"Dirs", fmt.Sprintf("%d", len(ids))},
}
}
// Exists returns true if the given id is registered with
@@ -309,24 +313,44 @@ func (a *Driver) Cleanup() error {
return nil
}
func (a *Driver) aufsMount(ro []string, rw, target string) error {
rwBranch := fmt.Sprintf("%v=rw", rw)
roBranches := ""
for _, layer := range ro {
roBranches += fmt.Sprintf("%v=ro+wh:", layer)
}
branches := fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)
func (a *Driver) aufsMount(ro []string, rw, target string) (err error) {
defer func() {
if err != nil {
Unmount(target)
}
}()
//if error, try to load aufs kernel module
if err := mount("none", target, "aufs", 0, branches); err != nil {
log.Printf("Kernel does not support AUFS, trying to load the AUFS module with modprobe...")
if err := exec.Command("modprobe", "aufs").Run(); err != nil {
return fmt.Errorf("Unable to load the AUFS module")
if err = a.tryMount(ro, rw, target); err != nil {
if err = a.mountRw(rw, target); err != nil {
return
}
log.Printf("...module loaded.")
if err := mount("none", target, "aufs", 0, branches); err != nil {
return fmt.Errorf("Unable to mount using aufs %s", err)
for _, layer := range ro {
branch := fmt.Sprintf("append:%s=ro+wh", layer)
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, branch); err != nil {
return
}
}
}
return nil
return
}
// Try to mount using the aufs fast path, if this fails then
// append ro layers.
func (a *Driver) tryMount(ro []string, rw, target string) (err error) {
var (
rwBranch = fmt.Sprintf("%s=rw", rw)
roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:"))
)
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches))
}
func (a *Driver) mountRw(rw, target string) error {
return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw))
}
func rollbackMount(target string, err error) {
if err != nil {
Unmount(target)
}
}

View File

@@ -1,7 +1,11 @@
package aufs
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"github.com/dotcloud/docker/archive"
"io/ioutil"
"os"
"path"
"testing"
@@ -446,7 +450,9 @@ func TestDiffSize(t *testing.T) {
if err != nil {
t.Fatal(err)
}
f.Truncate(size)
if err := f.Truncate(size); err != nil {
t.Fatal(err)
}
s, err := f.Stat()
if err != nil {
t.Fatal(err)
@@ -465,6 +471,108 @@ func TestDiffSize(t *testing.T) {
}
}
func TestChildDiffSize(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
defer d.Cleanup()
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
diffPath, err := d.Get("1")
if err != nil {
t.Fatal(err)
}
// Add a file to the diff path with a fixed size
size := int64(1024)
f, err := os.Create(path.Join(diffPath, "test_file"))
if err != nil {
t.Fatal(err)
}
if err := f.Truncate(size); err != nil {
t.Fatal(err)
}
s, err := f.Stat()
if err != nil {
t.Fatal(err)
}
size = s.Size()
if err := f.Close(); err != nil {
t.Fatal(err)
}
diffSize, err := d.DiffSize("1")
if err != nil {
t.Fatal(err)
}
if diffSize != size {
t.Fatalf("Expected size to be %d got %d", size, diffSize)
}
if err := d.Create("2", "1"); err != nil {
t.Fatal(err)
}
diffSize, err = d.DiffSize("2")
if err != nil {
t.Fatal(err)
}
// The diff size for the child should be zero
if diffSize != 0 {
t.Fatalf("Expected size to be %d got %d", 0, diffSize)
}
}
func TestExists(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
defer d.Cleanup()
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
if d.Exists("none") {
t.Fatal("id name should not exist in the driver")
}
if !d.Exists("1") {
t.Fatal("id 1 should exist in the driver")
}
}
func TestStatus(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
defer d.Cleanup()
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
status := d.Status()
if status == nil || len(status) == 0 {
t.Fatal("Status should not be nil or empty")
}
rootDir := status[0]
dirs := status[1]
if rootDir[0] != "Root Dir" {
t.Fatalf("Expected Root Dir got %s", rootDir[0])
}
if rootDir[1] != d.rootPath() {
t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1])
}
if dirs[0] != "Dirs" {
t.Fatalf("Expected Dirs got %s", dirs[0])
}
if dirs[1] != "1" {
t.Fatalf("Expected 1 got %s", dirs[1])
}
}
func TestApplyDiff(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
@@ -486,7 +594,9 @@ func TestApplyDiff(t *testing.T) {
if err != nil {
t.Fatal(err)
}
f.Truncate(size)
if err := f.Truncate(size); err != nil {
t.Fatal(err)
}
f.Close()
diff, err := d.Diff("1")
@@ -515,3 +625,70 @@ func TestApplyDiff(t *testing.T) {
t.Fatal(err)
}
}
func hash(c string) string {
h := sha256.New()
fmt.Fprint(h, c)
return hex.EncodeToString(h.Sum(nil))
}
func TestMountMoreThan42Layers(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
defer d.Cleanup()
var last string
var expected int
for i := 1; i < 127; i++ {
expected++
var (
parent = fmt.Sprintf("%d", i-1)
current = fmt.Sprintf("%d", i)
)
if parent == "0" {
parent = ""
} else {
parent = hash(parent)
}
current = hash(current)
if err := d.Create(current, parent); err != nil {
t.Logf("Current layer %d", i)
t.Fatal(err)
}
point, err := d.Get(current)
if err != nil {
t.Logf("Current layer %d", i)
t.Fatal(err)
}
f, err := os.Create(path.Join(point, current))
if err != nil {
t.Logf("Current layer %d", i)
t.Fatal(err)
}
f.Close()
if i%10 == 0 {
if err := os.Remove(path.Join(point, parent)); err != nil {
t.Logf("Current layer %d", i)
t.Fatal(err)
}
expected--
}
last = current
}
// Perform the actual mount for the top most image
point, err := d.Get(last)
if err != nil {
t.Fatal(err)
}
files, err := ioutil.ReadDir(point)
if err != nil {
t.Fatal(err)
}
if len(files) != expected {
t.Fatalf("Expected %d got %d", expected, len(files))
}
}

View File

@@ -38,6 +38,9 @@ func pathExists(pth string) bool {
// symlink.
func (a *Driver) Migrate(pth string, setupInit func(p string) error) error {
if pathExists(path.Join(pth, "graph")) {
if err := a.migrateRepositories(pth); err != nil {
return err
}
if err := a.migrateImages(path.Join(pth, "graph")); err != nil {
return err
}
@@ -46,6 +49,14 @@ func (a *Driver) Migrate(pth string, setupInit func(p string) error) error {
return nil
}
func (a *Driver) migrateRepositories(pth string) error {
name := path.Join(pth, "repositories")
if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error {
fis, err := ioutil.ReadDir(pth)
if err != nil {

View File

@@ -2,6 +2,6 @@ package aufs
import "syscall"
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
func mount(source string, target string, fstype string, flags uintptr, data string) error {
return syscall.Mount(source, target, fstype, flags, data)
}

View File

@@ -0,0 +1,126 @@
// +build linux
package devmapper
import (
"fmt"
"github.com/dotcloud/docker/utils"
)
func stringToLoopName(src string) [LoNameSize]uint8 {
var dst [LoNameSize]uint8
copy(dst[:], src[:])
return dst
}
func getNextFreeLoopbackIndex() (int, error) {
f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644)
if err != nil {
return 0, err
}
defer f.Close()
index, err := ioctlLoopCtlGetFree(f.Fd())
if index < 0 {
index = 0
}
return index, err
}
func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) {
// Start looking for a free /dev/loop
for {
target := fmt.Sprintf("/dev/loop%d", index)
index++
fi, err := osStat(target)
if err != nil {
if osIsNotExist(err) {
utils.Errorf("There are no more loopback device available.")
}
return nil, ErrAttachLoopbackDevice
}
if fi.Mode()&osModeDevice != osModeDevice {
utils.Errorf("Loopback device %s is not a block device.", target)
continue
}
// OpenFile adds O_CLOEXEC
loopFile, err = osOpenFile(target, osORdWr, 0644)
if err != nil {
utils.Errorf("Error openning loopback device: %s", err)
return nil, ErrAttachLoopbackDevice
}
// Try to attach to the loop file
if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
loopFile.Close()
// If the error is EBUSY, then try the next loopback
if err != sysEBusy {
utils.Errorf("Cannot set up loopback device %s: %s", target, err)
return nil, ErrAttachLoopbackDevice
}
// Otherwise, we keep going with the loop
continue
}
// In case of success, we finished. Break the loop.
break
}
// This can't happen, but let's be sure
if loopFile == nil {
utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
return nil, ErrAttachLoopbackDevice
}
return loopFile, nil
}
// attachLoopDevice attaches the given sparse file to the next
// available loopback device. It returns an opened *osFile.
func attachLoopDevice(sparseName string) (loop *osFile, err error) {
// Try to retrieve the next available loopback device via syscall.
// If it fails, we discard error and start loopking for a
// loopback from index 0.
startIndex, err := getNextFreeLoopbackIndex()
if err != nil {
utils.Debugf("Error retrieving the next available loopback: %s", err)
}
// OpenFile adds O_CLOEXEC
sparseFile, err := osOpenFile(sparseName, osORdWr, 0644)
if err != nil {
utils.Errorf("Error openning sparse file %s: %s", sparseName, err)
return nil, ErrAttachLoopbackDevice
}
defer sparseFile.Close()
loopFile, err := openNextAvailableLoopback(startIndex, sparseFile)
if err != nil {
return nil, err
}
// Set the status of the loopback device
loopInfo := &LoopInfo64{
loFileName: stringToLoopName(loopFile.Name()),
loOffset: 0,
loFlags: LoFlagsAutoClear,
}
if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
utils.Errorf("Cannot set up loopback device info: %s", err)
// If the call failed, then free the loopback device
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
utils.Errorf("Error while cleaning up the loopback device")
}
loopFile.Close()
return nil, ErrAttachLoopbackDevice
}
return loopFile, nil
}

View File

@@ -1,18 +1,18 @@
// +build linux
package devmapper
import (
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"sync"
"syscall"
"time"
)
@@ -105,7 +105,7 @@ func (devices *DeviceSet) hasImage(name string) bool {
dirname := devices.loopbackDir()
filename := path.Join(dirname, name)
_, err := os.Stat(filename)
_, err := osStat(filename)
return err == nil
}
@@ -117,16 +117,16 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) {
dirname := devices.loopbackDir()
filename := path.Join(dirname, name)
if err := os.MkdirAll(dirname, 0700); err != nil && !os.IsExist(err) {
if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) {
return "", err
}
if _, err := os.Stat(filename); err != nil {
if !os.IsNotExist(err) {
if _, err := osStat(filename); err != nil {
if !osIsNotExist(err) {
return "", err
}
utils.Debugf("Creating loopback file %s for device-manage use", filename)
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600)
file, err := osOpenFile(filename, osORdWr|osOCreate, 0600)
if err != nil {
return "", err
}
@@ -174,8 +174,8 @@ func (devices *DeviceSet) saveMetadata() error {
if err := tmpFile.Close(); err != nil {
return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err)
}
if err := os.Rename(tmpFile.Name(), devices.jsonFile()); err != nil {
return fmt.Errorf("Error committing metadata file", err)
if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil {
return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err)
}
if devices.NewTransactionId != devices.TransactionId {
@@ -225,9 +225,9 @@ func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error {
func (devices *DeviceSet) createFilesystem(info *DevInfo) error {
devname := info.DevName()
err := exec.Command("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname).Run()
err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname)
if err != nil {
err = exec.Command("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname).Run()
err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname)
}
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
@@ -252,7 +252,7 @@ func (devices *DeviceSet) loadMetaData() error {
devices.NewTransactionId = devices.TransactionId
jsonData, err := ioutil.ReadFile(devices.jsonFile())
if err != nil && !os.IsNotExist(err) {
if err != nil && !osIsNotExist(err) {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
@@ -337,14 +337,13 @@ func (devices *DeviceSet) setupBaseImage() error {
}
func setCloseOnExec(name string) {
fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
if fileInfos != nil {
if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil {
for _, i := range fileInfos {
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name()))
if link == name {
fd, err := strconv.Atoi(i.Name())
if err == nil {
syscall.CloseOnExec(fd)
sysCloseOnExec(fd)
}
}
}
@@ -372,7 +371,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
datafilename := path.Join(dirname, "data")
metadatafilename := path.Join(dirname, "metadata")
datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0)
datafile, err := osOpenFile(datafilename, osORdWr, 0)
if datafile == nil {
return err
}
@@ -393,7 +392,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
}
defer dataloopback.Close()
metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0)
metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0)
if metadatafile == nil {
return err
}
@@ -443,11 +442,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
hasMetadata := devices.hasImage("metadata")
if !doInit && !hasData {
return fmt.Errorf("Looback data file not found %s")
return errors.New("Loopback data file not found")
}
if !doInit && !hasMetadata {
return fmt.Errorf("Looback metadata file not found %s")
return errors.New("Loopback metadata file not found")
}
createdLoopback := !hasData || !hasMetadata
@@ -464,11 +463,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
// Set the device prefix from the device id and inode of the docker root dir
st, err := os.Stat(devices.root)
st, err := osStat(devices.root)
if err != nil {
return fmt.Errorf("Error looking up dir %s: %s", devices.root, err)
}
sysSt := st.Sys().(*syscall.Stat_t)
sysSt := toSysStatT(st.Sys())
// "reg-" stands for "regular file".
// In the future we might use "dev-" for "device file", etc.
// docker-maj,min[-inode] stands for:
@@ -495,14 +494,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
if info.Exists == 0 {
utils.Debugf("Pool doesn't exist. Creating it.")
dataFile, err := AttachLoopDevice(data)
dataFile, err := attachLoopDevice(data)
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
defer dataFile.Close()
metadataFile, err := AttachLoopDevice(metadata)
metadataFile, err := attachLoopDevice(metadata)
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
@@ -641,7 +640,7 @@ func (devices *DeviceSet) deactivateDevice(hash string) error {
// or b) the 1 second timeout expires.
func (devices *DeviceSet) waitRemove(hash string) error {
utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, hash)
defer utils.Debugf("[deviceset %s] waitRemove END", devices.devicePrefix, hash)
defer utils.Debugf("[deviceset %s] waitRemove(%) END", devices.devicePrefix, hash)
devname, err := devices.byHash(hash)
if err != nil {
return err
@@ -654,10 +653,13 @@ func (devices *DeviceSet) waitRemove(hash string) error {
// The error might actually be something else, but we can't differentiate.
return nil
}
utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
if i%100 == 0 {
utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists)
}
if devinfo.Exists == 0 {
break
}
time.Sleep(1 * time.Millisecond)
}
if i == 1000 {
@@ -680,7 +682,9 @@ func (devices *DeviceSet) waitClose(hash string) error {
if err != nil {
return err
}
utils.Debugf("Waiting for unmount of %s: opencount=%d", devname, devinfo.OpenCount)
if i%100 == 0 {
utils.Debugf("Waiting for unmount of %s: opencount=%d", devname, devinfo.OpenCount)
}
if devinfo.OpenCount == 0 {
break
}
@@ -708,15 +712,16 @@ func (devices *DeviceSet) byHash(hash string) (devname string, err error) {
}
func (devices *DeviceSet) Shutdown() error {
utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
devices.Lock()
utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
defer devices.Unlock()
utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
for path, count := range devices.activeMounts {
for i := count; i > 0; i-- {
if err := syscall.Unmount(path, 0); err != nil {
if err := sysUnmount(path, 0); err != nil {
utils.Debugf("Shutdown unmounting %s, error: %s\n", path, err)
}
}
@@ -752,15 +757,15 @@ func (devices *DeviceSet) MountDevice(hash, path string, readOnly bool) error {
info := devices.Devices[hash]
var flags uintptr = syscall.MS_MGC_VAL
var flags uintptr = sysMsMgcVal
if readOnly {
flags = flags | syscall.MS_RDONLY
flags = flags | sysMsRdOnly
}
err := syscall.Mount(info.DevName(), path, "ext4", flags, "discard")
if err != nil && err == syscall.EINVAL {
err = syscall.Mount(info.DevName(), path, "ext4", flags, "")
err := sysMount(info.DevName(), path, "ext4", flags, "discard")
if err != nil && err == sysEInval {
err = sysMount(info.DevName(), path, "ext4", flags, "")
}
if err != nil {
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err)
@@ -779,7 +784,7 @@ func (devices *DeviceSet) UnmountDevice(hash, path string, deactivate bool) erro
defer devices.Unlock()
utils.Debugf("[devmapper] Unmount(%s)", path)
if err := syscall.Unmount(path, 0); err != nil {
if err := sysUnmount(path, 0); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}

View File

@@ -1,12 +1,12 @@
// +build linux
package devmapper
import (
"errors"
"fmt"
"github.com/dotcloud/docker/utils"
"os"
"runtime"
"syscall"
)
type DevmapperLogger interface {
@@ -49,7 +49,6 @@ var (
ErrTaskAddTarget = errors.New("dm_task_add_target failed")
ErrTaskSetSector = errors.New("dm_task_set_sector failed")
ErrTaskGetInfo = errors.New("dm_task_get_info failed")
ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
ErrNilCookie = errors.New("cookie ptr can't be nil")
ErrAttachLoopbackDevice = errors.New("loopback mounting failed")
@@ -86,7 +85,7 @@ type (
func (t *Task) destroy() {
if t != nil {
DmTaskDestory(t.unmanaged)
DmTaskDestroy(t.unmanaged)
runtime.SetFinalizer(t, nil)
}
}
@@ -180,45 +179,37 @@ func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64,
start, length, targetType, params
}
func AttachLoopDevice(filename string) (*os.File, error) {
var fd int
res := DmAttachLoopDevice(filename, &fd)
if res == "" {
return nil, ErrAttachLoopbackDevice
}
return os.NewFile(uintptr(fd), res), nil
}
func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
dev, inode, err := dmGetLoopbackBackingFile(file.Fd())
if err != 0 {
func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) {
loopInfo, err := ioctlLoopGetStatus64(file.Fd())
if err != nil {
utils.Errorf("Error get loopback backing file: %s\n", err)
return 0, 0, ErrGetLoopbackBackingFile
}
return dev, inode, nil
return loopInfo.loDevice, loopInfo.loInode, nil
}
func LoopbackSetCapacity(file *os.File) error {
err := dmLoopbackSetCapacity(file.Fd())
if err != 0 {
func LoopbackSetCapacity(file *osFile) error {
if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
utils.Errorf("Error loopbackSetCapacity: %s", err)
return ErrLoopbackSetCapacity
}
return nil
}
func FindLoopDeviceFor(file *os.File) *os.File {
func FindLoopDeviceFor(file *osFile) *osFile {
stat, err := file.Stat()
if err != nil {
return nil
}
targetInode := stat.Sys().(*syscall.Stat_t).Ino
targetDevice := stat.Sys().(*syscall.Stat_t).Dev
targetInode := stat.Sys().(*sysStatT).Ino
targetDevice := stat.Sys().(*sysStatT).Dev
for i := 0; true; i++ {
path := fmt.Sprintf("/dev/loop%d", i)
file, err := os.OpenFile(path, os.O_RDWR, 0)
file, err := osOpenFile(path, osORdWr, 0)
if err != nil {
if os.IsNotExist(err) {
if osIsNotExist(err) {
return nil
}
@@ -231,7 +222,6 @@ func FindLoopDeviceFor(file *os.File) *os.File {
if err == nil && dev == targetDevice && inode == targetInode {
return file
}
file.Close()
}
@@ -289,16 +279,17 @@ func RemoveDevice(name string) error {
return nil
}
func GetBlockDeviceSize(file *os.File) (uint64, error) {
size, errno := DmGetBlockSize(file.Fd())
if size == -1 || errno != 0 {
func GetBlockDeviceSize(file *osFile) (uint64, error) {
size, err := ioctlBlkGetSize64(file.Fd())
if err != nil {
utils.Errorf("Error getblockdevicesize: %s", err)
return 0, ErrGetBlockSize
}
return uint64(size), nil
}
// This is the programmatic example of "dmsetup create"
func createPool(poolName string, dataFile *os.File, metadataFile *os.File) error {
func createPool(poolName string, dataFile, metadataFile *osFile) error {
task, err := createTask(DeviceCreate, poolName)
if task == nil {
return err
@@ -328,7 +319,7 @@ func createPool(poolName string, dataFile *os.File, metadataFile *os.File) error
return nil
}
func reloadPool(poolName string, dataFile *os.File, metadataFile *os.File) error {
func reloadPool(poolName string, dataFile, metadataFile *osFile) error {
task, err := createTask(DeviceReload, poolName)
if task == nil {
return err
@@ -394,8 +385,8 @@ func getStatus(name string) (uint64, uint64, string, string, error) {
return 0, 0, "", "", fmt.Errorf("Non existing device %s", name)
}
_, start, length, target_type, params := task.GetNextTarget(0)
return start, length, target_type, params, nil
_, start, length, targetType, params := task.GetNextTarget(0)
return start, length, targetType, params, nil
}
func setTransactionId(poolName string, oldId uint64, newId uint64) error {
@@ -424,7 +415,7 @@ func suspendDevice(name string) error {
return err
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceSuspend")
return fmt.Errorf("Error running DeviceSuspend: %s", err)
}
return nil
}
@@ -441,7 +432,7 @@ func resumeDevice(name string) error {
}
if err := task.Run(); err != nil {
return fmt.Errorf("Error running DeviceSuspend")
return fmt.Errorf("Error running DeviceResume")
}
UdevWait(cookie)

View File

@@ -0,0 +1,106 @@
package devmapper
// Definition of struct dm_task and sub structures (from lvm2)
//
// struct dm_ioctl {
// /*
// * The version number is made up of three parts:
// * major - no backward or forward compatibility,
// * minor - only backwards compatible,
// * patch - both backwards and forwards compatible.
// *
// * All clients of the ioctl interface should fill in the
// * version number of the interface that they were
// * compiled with.
// *
// * All recognised ioctl commands (ie. those that don't
// * return -ENOTTY) fill out this field, even if the
// * command failed.
// */
// uint32_t version[3]; /* in/out */
// uint32_t data_size; /* total size of data passed in
// * including this struct */
// uint32_t data_start; /* offset to start of data
// * relative to start of this struct */
// uint32_t target_count; /* in/out */
// int32_t open_count; /* out */
// uint32_t flags; /* in/out */
// /*
// * event_nr holds either the event number (input and output) or the
// * udev cookie value (input only).
// * The DM_DEV_WAIT ioctl takes an event number as input.
// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
// * use the field as a cookie to return in the DM_COOKIE
// * variable with the uevents they issue.
// * For output, the ioctls return the event number, not the cookie.
// */
// uint32_t event_nr; /* in/out */
// uint32_t padding;
// uint64_t dev; /* in/out */
// char name[DM_NAME_LEN]; /* device name */
// char uuid[DM_UUID_LEN]; /* unique identifier for
// * the block device */
// char data[7]; /* padding or data */
// };
// struct target {
// uint64_t start;
// uint64_t length;
// char *type;
// char *params;
// struct target *next;
// };
// typedef enum {
// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */
// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */
// } dm_add_node_t;
// struct dm_task {
// int type;
// char *dev_name;
// char *mangled_dev_name;
// struct target *head, *tail;
// int read_only;
// uint32_t event_nr;
// int major;
// int minor;
// int allow_default_major_fallback;
// uid_t uid;
// gid_t gid;
// mode_t mode;
// uint32_t read_ahead;
// uint32_t read_ahead_flags;
// union {
// struct dm_ioctl *v4;
// } dmi;
// char *newname;
// char *message;
// char *geometry;
// uint64_t sector;
// int no_flush;
// int no_open_count;
// int skip_lockfs;
// int query_inactive_table;
// int suppress_identical_reload;
// dm_add_node_t add_node;
// uint64_t existing_table_size;
// int cookie_set;
// int new_uuid;
// int secure_data;
// int retry_remove;
// int enable_checks;
// int expected_errno;
// char *uuid;
// char *mangled_uuid;
// };
//

View File

@@ -1,3 +1,5 @@
// +build linux
package devmapper
import "C"

View File

@@ -1,11 +1,13 @@
// +build linux
package devmapper
import (
"syscall"
"testing"
)
func TestTaskCreate(t *testing.T) {
t.Skip("FIXME: not a unit test")
// Test success
taskCreate(t, DeviceInfo)
@@ -18,6 +20,7 @@ func TestTaskCreate(t *testing.T) {
}
func TestTaskRun(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
@@ -46,6 +49,7 @@ func TestTaskRun(t *testing.T) {
}
func TestTaskSetName(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
@@ -63,6 +67,7 @@ func TestTaskSetName(t *testing.T) {
}
func TestTaskSetMessage(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
@@ -80,6 +85,7 @@ func TestTaskSetMessage(t *testing.T) {
}
func TestTaskSetSector(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
@@ -97,6 +103,7 @@ func TestTaskSetSector(t *testing.T) {
}
func TestTaskSetCookie(t *testing.T) {
t.Skip("FIXME: not a unit test")
var (
cookie uint = 0
task = taskCreate(t, DeviceInfo)
@@ -121,6 +128,7 @@ func TestTaskSetCookie(t *testing.T) {
}
func TestTaskSetAddNode(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
@@ -142,6 +150,7 @@ func TestTaskSetAddNode(t *testing.T) {
}
func TestTaskSetRo(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
@@ -159,6 +168,7 @@ func TestTaskSetRo(t *testing.T) {
}
func TestTaskAddTarget(t *testing.T) {
t.Skip("FIXME: not a unit test")
task := taskCreate(t, DeviceInfo)
// Test success
@@ -247,10 +257,6 @@ func dmTaskAddTargetFail(task *CDmTask,
return -1
}
func dmTaskGetDriverVersionFail(task *CDmTask, version *string) int {
return -1
}
func dmTaskGetInfoFail(task *CDmTask, info *Info) int {
return -1
}
@@ -264,14 +270,10 @@ func dmAttachLoopDeviceFail(filename string, fd *int) string {
return ""
}
func sysGetBlockSizeFail(fd uintptr, size *uint64) syscall.Errno {
func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno {
return 1
}
func dmGetBlockSizeFail(fd uintptr) int64 {
return -1
}
func dmUdevWaitFail(cookie uint) int {
return -1
}

View File

@@ -1,125 +1,25 @@
// +build linux
package devmapper
/*
#cgo LDFLAGS: -L. -ldevmapper
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <libdevmapper.h>
#include <linux/loop.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <errno.h>
#include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
#include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
#ifndef LOOP_CTL_GET_FREE
#define LOOP_CTL_GET_FREE 0x4C82
#define LOOP_CTL_GET_FREE 0x4C82
#endif
// FIXME: this could easily be rewritten in go
char* attach_loop_device(const char *filename, int *loop_fd_out)
{
struct loop_info64 loopinfo = {0};
struct stat st;
char buf[64];
int i, loop_fd, fd, start_index;
char* loopname;
*loop_fd_out = -1;
start_index = 0;
fd = open("/dev/loop-control", O_RDONLY);
if (fd >= 0) {
start_index = ioctl(fd, LOOP_CTL_GET_FREE);
close(fd);
if (start_index < 0)
start_index = 0;
}
fd = open(filename, O_RDWR);
if (fd < 0) {
perror("open");
return NULL;
}
loop_fd = -1;
for (i = start_index ; loop_fd < 0 ; i++ ) {
if (sprintf(buf, "/dev/loop%d", i) < 0) {
close(fd);
return NULL;
}
if (stat(buf, &st)) {
if (!S_ISBLK(st.st_mode)) {
fprintf(stderr, "[error] Loopback device %s is not a block device.\n", buf);
} else if (errno == ENOENT) {
fprintf(stderr, "[error] There are no more loopback device available.\n");
} else {
fprintf(stderr, "[error] Unkown error trying to stat the loopback device %s (errno: %d).\n", buf, errno);
}
close(fd);
return NULL;
}
loop_fd = open(buf, O_RDWR);
if (loop_fd < 0 && errno == ENOENT) {
fprintf(stderr, "[error] The loopback device %s does not exists.\n", buf);
close(fd);
return NULL;
} else if (loop_fd < 0) {
fprintf(stderr, "[error] Unkown error openning the loopback device %s. (errno: %d)\n", buf, errno);
continue;
}
if (ioctl(loop_fd, LOOP_SET_FD, (void *)(size_t)fd) < 0) {
int errsv = errno;
close(loop_fd);
loop_fd = -1;
if (errsv != EBUSY) {
close(fd);
fprintf(stderr, "cannot set up loopback device %s: %s", buf, strerror(errsv));
return NULL;
}
continue;
}
close(fd);
strncpy((char*)loopinfo.lo_file_name, buf, LO_NAME_SIZE);
loopinfo.lo_offset = 0;
loopinfo.lo_flags = LO_FLAGS_AUTOCLEAR;
if (ioctl(loop_fd, LOOP_SET_STATUS64, &loopinfo) < 0) {
perror("ioctl LOOP_SET_STATUS64");
if (ioctl(loop_fd, LOOP_CLR_FD, 0) < 0) {
perror("ioctl LOOP_CLR_FD");
}
close(loop_fd);
fprintf (stderr, "cannot set up loopback device info");
return (NULL);
}
loopname = strdup(buf);
if (loopname == NULL) {
close(loop_fd);
return (NULL);
}
*loop_fd_out = loop_fd;
return (loopname);
}
return (NULL);
}
#ifndef LO_FLAGS_PARTSCAN
#define LO_FLAGS_PARTSCAN 8
#endif
// FIXME: Can't we find a way to do the logging in pure Go?
extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
static void log_cb(int level, const char *file, int line,
int dm_errno_or_class, const char *f, ...)
static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
{
char buffer[256];
va_list ap;
@@ -135,40 +35,72 @@ static void log_with_errno_init()
{
dm_log_with_errno_init(log_cb);
}
*/
import "C"
import (
"syscall"
"unsafe"
)
type (
CDmTask C.struct_dm_task
CLoopInfo64 C.struct_loop_info64
LoopInfo64 struct {
loDevice uint64 /* ioctl r/o */
loInode uint64 /* ioctl r/o */
loRdevice uint64 /* ioctl r/o */
loOffset uint64
loSizelimit uint64 /* bytes, 0 == max available */
loNumber uint32 /* ioctl r/o */
loEncrypt_type uint32
loEncrypt_key_size uint32 /* ioctl w/o */
loFlags uint32 /* ioctl r/o */
loFileName [LoNameSize]uint8
loCryptName [LoNameSize]uint8
loEncryptKey [LoKeySize]uint8 /* ioctl w/o */
loInit [2]uint64
}
)
// IOCTL consts
const (
BlkGetSize64 = C.BLKGETSIZE64
LoopSetFd = C.LOOP_SET_FD
LoopCtlGetFree = C.LOOP_CTL_GET_FREE
LoopGetStatus64 = C.LOOP_GET_STATUS64
LoopSetStatus64 = C.LOOP_SET_STATUS64
LoopClrFd = C.LOOP_CLR_FD
LoopSetCapacity = C.LOOP_SET_CAPACITY
)
const (
LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY
LoFlagsPartScan = C.LO_FLAGS_PARTSCAN
LoKeySize = C.LO_KEY_SIZE
LoNameSize = C.LO_NAME_SIZE
)
var (
DmTaskDestory = dmTaskDestroyFct
DmTaskCreate = dmTaskCreateFct
DmTaskRun = dmTaskRunFct
DmTaskSetName = dmTaskSetNameFct
DmTaskSetMessage = dmTaskSetMessageFct
DmTaskSetSector = dmTaskSetSectorFct
DmTaskSetCookie = dmTaskSetCookieFct
DmTaskSetAddNode = dmTaskSetAddNodeFct
DmTaskSetRo = dmTaskSetRoFct
DmTaskAddTarget = dmTaskAddTargetFct
DmTaskGetInfo = dmTaskGetInfoFct
DmGetLibraryVersion = dmGetLibraryVersionFct
DmGetNextTarget = dmGetNextTargetFct
DmGetBlockSize = dmGetBlockSizeFct
DmAttachLoopDevice = dmAttachLoopDeviceFct
DmUdevWait = dmUdevWaitFct
DmLogInitVerbose = dmLogInitVerboseFct
DmSetDevDir = dmSetDevDirFct
DmGetLibraryVersion = dmGetLibraryVersionFct
DmTaskAddTarget = dmTaskAddTargetFct
DmTaskCreate = dmTaskCreateFct
DmTaskDestroy = dmTaskDestroyFct
DmTaskGetInfo = dmTaskGetInfoFct
DmTaskRun = dmTaskRunFct
DmTaskSetAddNode = dmTaskSetAddNodeFct
DmTaskSetCookie = dmTaskSetCookieFct
DmTaskSetMessage = dmTaskSetMessageFct
DmTaskSetName = dmTaskSetNameFct
DmTaskSetRo = dmTaskSetRoFct
DmTaskSetSector = dmTaskSetSectorFct
DmUdevWait = dmUdevWaitFct
LogWithErrnoInit = logWithErrnoInitFct
GetBlockSize = getBlockSizeFct
)
func free(p *C.char) {
@@ -184,28 +116,26 @@ func dmTaskCreateFct(taskType int) *CDmTask {
}
func dmTaskRunFct(task *CDmTask) int {
return int(C.dm_task_run((*C.struct_dm_task)(task)))
ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
return int(ret)
}
func dmTaskSetNameFct(task *CDmTask, name string) int {
Cname := C.CString(name)
defer free(Cname)
return int(C.dm_task_set_name((*C.struct_dm_task)(task),
Cname))
return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
}
func dmTaskSetMessageFct(task *CDmTask, message string) int {
Cmessage := C.CString(message)
defer free(Cmessage)
return int(C.dm_task_set_message((*C.struct_dm_task)(task),
Cmessage))
return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
}
func dmTaskSetSectorFct(task *CDmTask, sector uint64) int {
return int(C.dm_task_set_sector((*C.struct_dm_task)(task),
C.uint64_t(sector)))
return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
}
func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int {
@@ -213,13 +143,11 @@ func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int {
defer func() {
*cookie = uint(cCookie)
}()
return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie,
C.uint16_t(flags)))
return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
}
func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int {
return int(C.dm_task_set_add_node((*C.struct_dm_task)(task),
C.dm_add_node_t(addNode)))
return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
}
func dmTaskSetRoFct(task *CDmTask) int {
@@ -235,27 +163,7 @@ func dmTaskAddTargetFct(task *CDmTask,
Cparams := C.CString(params)
defer free(Cparams)
return int(C.dm_task_add_target((*C.struct_dm_task)(task),
C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
}
func dmGetLoopbackBackingFile(fd uintptr) (uint64, uint64, syscall.Errno) {
var lo64 C.struct_loop_info64
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.LOOP_GET_STATUS64,
uintptr(unsafe.Pointer(&lo64)))
return uint64(lo64.lo_device), uint64(lo64.lo_inode), err
}
func dmLoopbackSetCapacity(fd uintptr) syscall.Errno {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.LOOP_SET_CAPACITY, 0)
return err
}
func dmGetBlockSizeFct(fd uintptr) (int64, syscall.Errno) {
var size int64
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BLKGETSIZE64,
uintptr(unsafe.Pointer(&size)))
return size, err
return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
}
func dmTaskGetInfoFct(task *CDmTask, info *Info) int {
@@ -275,9 +183,7 @@ func dmTaskGetInfoFct(task *CDmTask, info *Info) int {
return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
}
func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64,
target, params *string) uintptr {
func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
var (
Cstart, Clength C.uint64_t
CtargetType, Cparams *C.char
@@ -288,31 +194,11 @@ func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64,
*target = C.GoString(CtargetType)
*params = C.GoString(Cparams)
}()
nextp := C.dm_get_next_target((*C.struct_dm_task)(task),
unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams)
nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams)
return uintptr(nextp)
}
func dmAttachLoopDeviceFct(filename string, fd *int) string {
cFilename := C.CString(filename)
defer free(cFilename)
var cFd C.int
defer func() {
*fd = int(cFd)
}()
ret := C.attach_loop_device(cFilename, &cFd)
defer free(ret)
return C.GoString(ret)
}
func getBlockSizeFct(fd uintptr, size *uint64) syscall.Errno {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BLKGETSIZE64,
uintptr(unsafe.Pointer(&size)))
return err
}
func dmUdevWaitFct(cookie uint) int {
return int(C.dm_udev_wait(C.uint32_t(cookie)))
}

View File

@@ -1,10 +1,11 @@
// +build linux
package devmapper
import (
"fmt"
"github.com/dotcloud/docker/graphdriver"
"io/ioutil"
"os"
"path"
)
@@ -22,7 +23,7 @@ type Driver struct {
home string
}
func Init(home string) (graphdriver.Driver, error) {
var Init = func(home string) (graphdriver.Driver, error) {
deviceSet, err := NewDeviceSet(home, true)
if err != nil {
return nil, err
@@ -57,7 +58,7 @@ func (d *Driver) Cleanup() error {
return d.DeviceSet.Shutdown()
}
func (d *Driver) Create(id string, parent string) error {
func (d *Driver) Create(id, parent string) error {
if err := d.DeviceSet.AddDevice(id, parent); err != nil {
return err
}
@@ -67,7 +68,7 @@ func (d *Driver) Create(id string, parent string) error {
return err
}
if err := os.MkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !os.IsExist(err) {
if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) {
return err
}
@@ -98,7 +99,7 @@ func (d *Driver) Get(id string) (string, error) {
func (d *Driver) mount(id, mountPoint string) error {
// Create the target directories if they don't exist
if err := os.MkdirAll(mountPoint, 0755); err != nil && !os.IsExist(err) {
if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) {
return err
}
// If mountpoint is already mounted, do nothing
@@ -121,3 +122,7 @@ func (d *Driver) unmount(id, mountPoint string) error {
// Unmount the device
return d.DeviceSet.UnmountDevice(id, mountPoint, true)
}
func (d *Driver) Exists(id string) bool {
return d.Devices[id] != nil
}

View File

@@ -1,9 +1,15 @@
// +build linux
package devmapper
import (
"fmt"
"github.com/dotcloud/docker/graphdriver"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"syscall"
"testing"
)
@@ -12,7 +18,96 @@ func init() {
DefaultDataLoopbackSize = 300 * 1024 * 1024
DefaultMetaDataLoopbackSize = 200 * 1024 * 1024
DefaultBaseFsSize = 300 * 1024 * 1024
}
// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default
func denyAllDevmapper() {
// Hijack all calls to libdevmapper with default panics.
// Authorized calls are selectively hijacked in each tests.
DmTaskCreate = func(t int) *CDmTask {
panic("DmTaskCreate: this method should not be called here")
}
DmTaskRun = func(task *CDmTask) int {
panic("DmTaskRun: this method should not be called here")
}
DmTaskSetName = func(task *CDmTask, name string) int {
panic("DmTaskSetName: this method should not be called here")
}
DmTaskSetMessage = func(task *CDmTask, message string) int {
panic("DmTaskSetMessage: this method should not be called here")
}
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
panic("DmTaskSetSector: this method should not be called here")
}
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
panic("DmTaskSetCookie: this method should not be called here")
}
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
panic("DmTaskSetAddNode: this method should not be called here")
}
DmTaskSetRo = func(task *CDmTask) int {
panic("DmTaskSetRo: this method should not be called here")
}
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
panic("DmTaskAddTarget: this method should not be called here")
}
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
panic("DmTaskGetInfo: this method should not be called here")
}
DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr {
panic("DmGetNextTarget: this method should not be called here")
}
DmUdevWait = func(cookie uint) int {
panic("DmUdevWait: this method should not be called here")
}
DmSetDevDir = func(dir string) int {
panic("DmSetDevDir: this method should not be called here")
}
DmGetLibraryVersion = func(version *string) int {
panic("DmGetLibraryVersion: this method should not be called here")
}
DmLogInitVerbose = func(level int) {
panic("DmLogInitVerbose: this method should not be called here")
}
DmTaskDestroy = func(task *CDmTask) {
panic("DmTaskDestroy: this method should not be called here")
}
LogWithErrnoInit = func() {
panic("LogWithErrnoInit: this method should not be called here")
}
}
func denyAllSyscall() {
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
panic("sysMount: this method should not be called here")
}
sysUnmount = func(target string, flags int) (err error) {
panic("sysUnmount: this method should not be called here")
}
sysCloseOnExec = func(fd int) {
panic("sysCloseOnExec: this method should not be called here")
}
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
panic("sysSyscall: this method should not be called here")
}
// Not a syscall, but forbidding it here anyway
Mounted = func(mnt string) (bool, error) {
panic("devmapper.Mounted: this method should not be called here")
}
// osOpenFile = os.OpenFile
// osNewFile = os.NewFile
// osCreate = os.Create
// osStat = os.Stat
// osIsNotExist = os.IsNotExist
// osIsExist = os.IsExist
// osMkdirAll = os.MkdirAll
// osRemoveAll = os.RemoveAll
// osRename = os.Rename
// osReadlink = os.Readlink
// execRun = func(name string, args ...string) error {
// return exec.Command(name, args...).Run()
// }
}
func mkTestDirectory(t *testing.T) string {
@@ -34,72 +129,534 @@ func newDriver(t *testing.T) *Driver {
func cleanup(d *Driver) {
d.Cleanup()
os.RemoveAll(d.home)
osRemoveAll(d.home)
}
type Set map[string]bool
func (r Set) Assert(t *testing.T, names ...string) {
for _, key := range names {
if _, exists := r[key]; !exists {
t.Fatalf("Key not set: %s", key)
}
delete(r, key)
}
if len(r) != 0 {
t.Fatalf("Unexpected keys: %v", r)
}
}
func TestInit(t *testing.T) {
home := mkTestDirectory(t)
defer os.RemoveAll(home)
driver, err := Init(home)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := driver.Cleanup(); err != nil {
var (
calls = make(Set)
taskMessages = make(Set)
taskTypes = make(Set)
home = mkTestDirectory(t)
)
defer osRemoveAll(home)
func() {
denyAllDevmapper()
DmSetDevDir = func(dir string) int {
calls["DmSetDevDir"] = true
expectedDir := "/dev"
if dir != expectedDir {
t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir)
}
return 0
}
LogWithErrnoInit = func() {
calls["DmLogWithErrnoInit"] = true
}
var task1 CDmTask
DmTaskCreate = func(taskType int) *CDmTask {
calls["DmTaskCreate"] = true
taskTypes[fmt.Sprintf("%d", taskType)] = true
return &task1
}
DmTaskSetName = func(task *CDmTask, name string) int {
calls["DmTaskSetName"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task)
}
// FIXME: use Set.AssertRegexp()
if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") ||
!strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name)
}
return 1
}
DmTaskRun = func(task *CDmTask) int {
calls["DmTaskRun"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task)
}
return 1
}
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
calls["DmTaskGetInfo"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task)
}
// This will crash if info is not dereferenceable
info.Exists = 0
return 1
}
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
calls["DmTaskSetSector"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task)
}
if expectedSector := uint64(0); sector != expectedSector {
t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector)
}
return 1
}
DmTaskSetMessage = func(task *CDmTask, message string) int {
calls["DmTaskSetMessage"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task)
}
taskMessages[message] = true
return 1
}
DmTaskDestroy = func(task *CDmTask) {
calls["DmTaskDestroy"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
}
}
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
calls["DmTaskSetTarget"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
}
if start != 0 {
t.Fatalf("Wrong start: %d != %d", start, 0)
}
if ttype != "thin" && ttype != "thin-pool" {
t.Fatalf("Wrong ttype: %s", ttype)
}
// Quick smoke test
if params == "" {
t.Fatalf("Params should not be empty")
}
return 1
}
fakeCookie := uint(4321)
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
calls["DmTaskSetCookie"] = true
expectedTask := &task1
if task != expectedTask {
t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task)
}
if flags != 0 {
t.Fatalf("Cookie flags should be 0 (not %x)", flags)
}
*cookie = fakeCookie
return 1
}
DmUdevWait = func(cookie uint) int {
calls["DmUdevWait"] = true
if cookie != fakeCookie {
t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie)
}
return 1
}
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
if addNode != AddNodeOnCreate {
t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate)
}
calls["DmTaskSetAddNode"] = true
return 1
}
execRun = func(name string, args ...string) error {
calls["execRun"] = true
if name != "mkfs.ext4" {
t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name)
}
return nil
}
driver, err := Init(home)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := driver.Cleanup(); err != nil {
t.Fatal(err)
}
}()
}()
// Put all tests in a funciton to make sure the garbage collection will
// occur.
id := "foo"
if err := driver.Create(id, ""); err != nil {
t.Fatal(err)
// Call GC to cleanup runtime.Finalizers
runtime.GC()
calls.Assert(t,
"DmSetDevDir",
"DmLogWithErrnoInit",
"DmTaskSetName",
"DmTaskRun",
"DmTaskGetInfo",
"DmTaskDestroy",
"execRun",
"DmTaskCreate",
"DmTaskSetTarget",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetSector",
"DmTaskSetMessage",
"DmTaskSetAddNode",
)
taskTypes.Assert(t, "0", "6", "17")
taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1")
}
func fakeInit() func(home string) (graphdriver.Driver, error) {
oldInit := Init
Init = func(home string) (graphdriver.Driver, error) {
return &Driver{
home: home,
}, nil
}
dir, err := driver.Get(id)
if err != nil {
t.Fatal(err)
return oldInit
}
func restoreInit(init func(home string) (graphdriver.Driver, error)) {
Init = init
}
func mockAllDevmapper(calls Set) {
DmSetDevDir = func(dir string) int {
calls["DmSetDevDir"] = true
return 0
}
if st, err := os.Stat(dir); err != nil {
t.Fatal(err)
} else if !st.IsDir() {
t.Fatalf("Get(%V) did not return a directory", id)
LogWithErrnoInit = func() {
calls["DmLogWithErrnoInit"] = true
}
DmTaskCreate = func(taskType int) *CDmTask {
calls["DmTaskCreate"] = true
return &CDmTask{}
}
DmTaskSetName = func(task *CDmTask, name string) int {
calls["DmTaskSetName"] = true
return 1
}
DmTaskRun = func(task *CDmTask) int {
calls["DmTaskRun"] = true
return 1
}
DmTaskGetInfo = func(task *CDmTask, info *Info) int {
calls["DmTaskGetInfo"] = true
return 1
}
DmTaskSetSector = func(task *CDmTask, sector uint64) int {
calls["DmTaskSetSector"] = true
return 1
}
DmTaskSetMessage = func(task *CDmTask, message string) int {
calls["DmTaskSetMessage"] = true
return 1
}
DmTaskDestroy = func(task *CDmTask) {
calls["DmTaskDestroy"] = true
}
DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int {
calls["DmTaskSetTarget"] = true
return 1
}
DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int {
calls["DmTaskSetCookie"] = true
return 1
}
DmUdevWait = func(cookie uint) int {
calls["DmUdevWait"] = true
return 1
}
DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int {
calls["DmTaskSetAddNode"] = true
return 1
}
execRun = func(name string, args ...string) error {
calls["execRun"] = true
return nil
}
}
func TestDriverName(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
denyAllDevmapper()
defer denyAllDevmapper()
oldInit := fakeInit()
defer restoreInit(oldInit)
d := newDriver(t)
if d.String() != "devicemapper" {
t.Fatalf("Expected driver name to be devicemapper got %s", d.String())
}
}
func TestDriverCreate(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
denyAllDevmapper()
denyAllSyscall()
defer denyAllSyscall()
defer denyAllDevmapper()
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
calls := make(Set)
mockAllDevmapper(calls)
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
calls["sysMount"] = true
// FIXME: compare the exact source and target strings (inodes + devname)
if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source)
}
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
}
if expectedFstype := "ext4"; fstype != expectedFstype {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype)
}
if expectedFlags := uintptr(3236757504); flags != expectedFlags {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
}
return nil
}
Mounted = func(mnt string) (bool, error) {
calls["Mounted"] = true
if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") {
t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt)
}
return false, nil
}
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
calls["sysSyscall"] = true
if trap != sysSysIoctl {
t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
}
switch a2 {
case LoopSetFd:
calls["ioctl.loopsetfd"] = true
case LoopCtlGetFree:
calls["ioctl.loopctlgetfree"] = true
case LoopGetStatus64:
calls["ioctl.loopgetstatus"] = true
case LoopSetStatus64:
calls["ioctl.loopsetstatus"] = true
case LoopClrFd:
calls["ioctl.loopclrfd"] = true
case LoopSetCapacity:
calls["ioctl.loopsetcapacity"] = true
case BlkGetSize64:
calls["ioctl.blkgetsize"] = true
default:
t.Fatalf("Unexpected IOCTL. Received %d", a2)
}
return 0, 0, 0
}
func() {
d := newDriver(t)
calls.Assert(t,
"DmSetDevDir",
"DmLogWithErrnoInit",
"DmTaskSetName",
"DmTaskRun",
"DmTaskGetInfo",
"execRun",
"DmTaskCreate",
"DmTaskSetTarget",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetSector",
"DmTaskSetMessage",
"DmTaskSetAddNode",
"sysSyscall",
"ioctl.blkgetsize",
"ioctl.loopsetfd",
"ioctl.loopsetstatus",
)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
calls.Assert(t,
"DmTaskCreate",
"DmTaskGetInfo",
"sysMount",
"Mounted",
"DmTaskRun",
"DmTaskSetTarget",
"DmTaskSetSector",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetName",
"DmTaskSetMessage",
"DmTaskSetAddNode",
)
}()
runtime.GC()
calls.Assert(t,
"DmTaskDestroy",
)
}
func TestDriverRemove(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
denyAllDevmapper()
denyAllSyscall()
defer denyAllSyscall()
defer denyAllDevmapper()
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
calls := make(Set)
mockAllDevmapper(calls)
sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) {
calls["sysMount"] = true
// FIXME: compare the exact source and target strings (inodes + devname)
if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source)
}
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
}
if expectedFstype := "ext4"; fstype != expectedFstype {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype)
}
if expectedFlags := uintptr(3236757504); flags != expectedFlags {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
}
return nil
}
sysUnmount = func(target string, flags int) (err error) {
calls["sysUnmount"] = true
// FIXME: compare the exact source and target strings (inodes + devname)
if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target)
}
if expectedFlags := 0; flags != expectedFlags {
t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags)
}
return nil
}
Mounted = func(mnt string) (bool, error) {
calls["Mounted"] = true
return false, nil
}
if err := d.Remove("1"); err != nil {
t.Fatal(err)
sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
calls["sysSyscall"] = true
if trap != sysSysIoctl {
t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap)
}
switch a2 {
case LoopSetFd:
calls["ioctl.loopsetfd"] = true
case LoopCtlGetFree:
calls["ioctl.loopctlgetfree"] = true
case LoopGetStatus64:
calls["ioctl.loopgetstatus"] = true
case LoopSetStatus64:
calls["ioctl.loopsetstatus"] = true
case LoopClrFd:
calls["ioctl.loopclrfd"] = true
case LoopSetCapacity:
calls["ioctl.loopsetcapacity"] = true
case BlkGetSize64:
calls["ioctl.blkgetsize"] = true
default:
t.Fatalf("Unexpected IOCTL. Received %d", a2)
}
return 0, 0, 0
}
func() {
d := newDriver(t)
calls.Assert(t,
"DmSetDevDir",
"DmLogWithErrnoInit",
"DmTaskSetName",
"DmTaskRun",
"DmTaskGetInfo",
"execRun",
"DmTaskCreate",
"DmTaskSetTarget",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetSector",
"DmTaskSetMessage",
"DmTaskSetAddNode",
"sysSyscall",
"ioctl.blkgetsize",
"ioctl.loopsetfd",
"ioctl.loopsetstatus",
)
if err := d.Create("1", ""); err != nil {
t.Fatal(err)
}
calls.Assert(t,
"DmTaskCreate",
"DmTaskGetInfo",
"sysMount",
"Mounted",
"DmTaskRun",
"DmTaskSetTarget",
"DmTaskSetSector",
"DmTaskSetCookie",
"DmUdevWait",
"DmTaskSetName",
"DmTaskSetMessage",
"DmTaskSetAddNode",
)
Mounted = func(mnt string) (bool, error) {
calls["Mounted"] = true
return true, nil
}
if err := d.Remove("1"); err != nil {
t.Fatal(err)
}
calls.Assert(t,
"DmTaskRun",
"DmTaskSetSector",
"DmTaskSetName",
"DmTaskSetMessage",
"DmTaskCreate",
"DmTaskGetInfo",
"Mounted",
"sysUnmount",
)
}()
runtime.GC()
calls.Assert(t,
"DmTaskDestroy",
)
}
func TestCleanup(t *testing.T) {
t.Skip("FIXME: not a unit test")
t.Skip("Unimplemented")
d := newDriver(t)
defer os.RemoveAll(d.home)
defer osRemoveAll(d.home)
mountPoints := make([]string, 2)
@@ -161,6 +718,7 @@ func TestCleanup(t *testing.T) {
}
func TestNotMounted(t *testing.T) {
t.Skip("FIXME: not a unit test")
t.Skip("Not implemented")
d := newDriver(t)
defer cleanup(d)
@@ -179,6 +737,7 @@ func TestNotMounted(t *testing.T) {
}
func TestMounted(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
defer cleanup(d)
@@ -199,6 +758,7 @@ func TestMounted(t *testing.T) {
}
func TestInitCleanedDriver(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
if err := d.Create("1", ""); err != nil {
@@ -225,6 +785,7 @@ func TestInitCleanedDriver(t *testing.T) {
}
func TestMountMountedDriver(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
defer cleanup(d)
@@ -243,6 +804,7 @@ func TestMountMountedDriver(t *testing.T) {
}
func TestGetReturnsValidDevice(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
defer cleanup(d)
@@ -268,6 +830,7 @@ func TestGetReturnsValidDevice(t *testing.T) {
}
func TestDriverGetSize(t *testing.T) {
t.Skip("FIXME: not a unit test")
t.Skipf("Size is currently not implemented")
d := newDriver(t)
@@ -284,7 +847,7 @@ func TestDriverGetSize(t *testing.T) {
size := int64(1024)
f, err := os.Create(path.Join(mountPoint, "test_file"))
f, err := osCreate(path.Join(mountPoint, "test_file"))
if err != nil {
t.Fatal(err)
}
@@ -301,3 +864,15 @@ func TestDriverGetSize(t *testing.T) {
// t.Fatalf("Expected size %d got %d", size, diffSize)
// }
}
func assertMap(t *testing.T, m map[string]bool, keys ...string) {
for _, key := range keys {
if _, exists := m[key]; !exists {
t.Fatalf("Key not set: %s", key)
}
delete(m, key)
}
if len(m) != 0 {
t.Fatalf("Unexpected keys: %v", m)
}
}

View File

@@ -0,0 +1,60 @@
// +build linux
package devmapper
import (
"unsafe"
)
func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0)
if err != 0 {
return 0, err
}
return int(index), nil
}
func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 {
return err
}
return nil
}
func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
return err
}
return nil
}
func ioctlLoopClrFd(loopFd uintptr) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 {
return err
}
return nil
}
func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) {
loopInfo := &LoopInfo64{}
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
return nil, err
}
return loopInfo, nil
}
func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
return err
}
return nil
}
func ioctlBlkGetSize64(fd uintptr) (int64, error) {
var size int64
if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
return 0, err
}
return size, nil
}

View File

@@ -1,27 +1,27 @@
// +build linux
package devmapper
import (
"os"
"path/filepath"
"syscall"
)
// FIXME: this is copy-pasted from the aufs driver.
// It should be moved into the core.
func Mounted(mountpoint string) (bool, error) {
mntpoint, err := os.Stat(mountpoint)
var Mounted = func(mountpoint string) (bool, error) {
mntpoint, err := osStat(mountpoint)
if err != nil {
if os.IsNotExist(err) {
if osIsNotExist(err) {
return false, nil
}
return false, err
}
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
parent, err := osStat(filepath.Join(mountpoint, ".."))
if err != nil {
return false, err
}
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
parentSt := parent.Sys().(*syscall.Stat_t)
mntpointSt := toSysStatT(mntpoint.Sys())
parentSt := toSysStatT(parent.Sys())
return mntpointSt.Dev != parentSt.Dev, nil
}

Some files were not shown because too many files have changed in this diff Show More