Compare commits

...

155 Commits

Author SHA1 Message Date
Solomon Hykes
5ffd63070f Bumped version to 0.2.2 2013-05-03 15:19:20 -07:00
Solomon Hykes
701132259d + Documentation: new example: sharing data between 2 couchdb databases 2013-05-03 15:13:12 -07:00
Solomon Hykes
18b8eeb484 + Support for data volumes 2013-05-03 13:03:47 -07:00
Solomon Hykes
a7c0e9a355 Fix a bug in the Makefile which caused dependency download to fail 2013-05-03 12:58:44 -07:00
Guillaume J. Charmes
19df5a7965 Merge pull request #487 from brunoqc/patch-2
* vagrant: Use only one deb line in /etc/apt
2013-05-03 11:55:16 -07:00
Guillaume J. Charmes
8ef72cbc94 Merge pull request #508 from bdon/master
* docs: doc fix
2013-05-03 11:41:14 -07:00
Brandon Liu
6cbe27b7a5 correct documentation for where images are stored on filesystem. 2013-05-02 20:37:08 -07:00
Solomon Hykes
a82b60b30d dockerbuilder: change order of dependencies 2013-05-02 19:22:41 -07:00
Solomon Hykes
c08d245539 dockerbuilder: let the Makefile upload to s3 with 'make release' 2013-05-02 18:11:54 -07:00
Solomon Hykes
b6a5e604ab Add s3 upload to 'make release' 2013-05-02 11:32:55 -07:00
Solomon Hykes
6e486b638b + Hack: 'make s3release' uploads a clean build to s3 2013-05-02 11:25:49 -07:00
Bruno Bigras
74cd7e822d Use only one deb line in /etc/apt
This prevents the script from filling up /etc/apt/sources.list with more than one deb line which cause a warning when updating.
2013-05-02 13:33:23 -04:00
Guillaume J. Charmes
21b9dcd518 Update docs for Command Run 2013-05-02 09:26:29 -07:00
Guillaume J. Charmes
897cc573f0 Fix the graph.Create prototype 2013-05-02 09:23:29 -07:00
Guillaume J. Charmes
b0459adc27 Comply to the new graph.Create() prototype 2013-05-02 09:14:23 -07:00
Guillaume J. Charmes
3edd14b8c2 Implement the data volume removal 2013-05-02 09:14:23 -07:00
Guillaume J. Charmes
4099a31304 Implement the -volumes-from in order to mount volumes from an other container 2013-05-02 09:14:22 -07:00
Guillaume J. Charmes
6fb495bf6f Move the id of volumes to Container (instead of Container.Config) 2013-05-02 09:14:22 -07:00
Guillaume J. Charmes
faf8daa7c6 Switch back config to map[string]struct{} 2013-05-02 09:14:22 -07:00
Guillaume J. Charmes
8d9aaee60b Handle data volumes mount points 2013-05-02 09:14:22 -07:00
Guillaume J. Charmes
35d704c8a0 Change the volumes type to map[string]string to store both source and destination 2013-05-02 09:14:22 -07:00
Solomon Hykes
1df5f4094b docker run -v PATH: bind a new data volume to a container 2013-05-02 09:14:22 -07:00
unclejack
528da23d6a use Go 1.0.3 to build docker 2013-05-02 15:27:37 +03:00
Guillaume J. Charmes
ff5e238de9 Merge pull request #496 from dotcloud/480-vagrant-fix
- vagrant: Fix main Vagrantfile
2013-05-01 23:38:18 -07:00
Solomon Hykes
c63dce393e Merge pull request #492 from kencochrane/registry-api-doc
+ Registry: added the registry API to the docker docs
2013-05-01 23:00:58 -07:00
Solomon Hykes
d6a63132ef Merge branch 'origin/new-dockerbuilder' 2013-05-02 05:57:50 +00:00
Solomon Hykes
e7271cdaae dockerbuilder: fix permissions 2013-05-02 05:56:51 +00:00
Solomon Hykes
6ca3b151b1 * Hack: improve the way dockerbuilder is built 2013-05-01 22:05:36 -07:00
Daniel Mizyrycki
0d9475346f Fix main Vagrantfile 2013-05-01 18:49:31 -07:00
Solomon Hykes
71199f595d New Dockerfile operation: 'add' 2013-05-01 18:32:38 -07:00
Solomon Hykes
58b95878f1 - Hack: fix dockerbuilder to build feature branches 2013-05-02 01:16:23 +00:00
Guillaume J. Charmes
e431dc26f1 Merge pull request #482 from dotcloud/move_capabilitie_function
* runtime: Move the capabilities detection into a runtime method
2013-05-01 17:43:32 -07:00
Solomon Hykes
09b1cd58c0 Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-05-01 17:18:05 -07:00
Solomon Hykes
d42639e5c5 Bumped version to 0.2.1 2013-05-01 17:17:13 -07:00
Guillaume J. Charmes
d0c2e31fb9 Merge pull request #495 from dotcloud/autorun_docs
* docs: Update commandline Commit doc
2013-05-01 17:14:53 -07:00
Guillaume J. Charmes
509a01bbe4 Merge pull request #493 from dotcloud/374-developer-environment
* hack: development; issue #374: Refactor 'make hack' making Vagrantfile and VM more useful
2013-05-01 16:54:29 -07:00
Solomon Hykes
e7fb7f13d5 new Dockerfile keyword: cmd to set a default runtime command 2013-05-01 16:43:37 -07:00
Daniel Mizyrycki
d172da58ce development; issue #374: Update VM documentation 2013-05-01 15:59:54 -07:00
Solomon Hykes
ad86dde10c * 'docker commit' inherits parent layer's run configuration by default 2013-05-01 15:45:39 -07:00
Guillaume J. Charmes
c20e46587d Update commandline Commit doc 2013-05-01 15:43:02 -07:00
Daniel Mizyrycki
eeb03164cf development; issue #374: Upgrade development VM box to Ubuntu-13.04 with kernel 3.8 2013-05-01 15:26:27 -07:00
Daniel Mizyrycki
bb61678b57 development; issue #374: Refactor 'make hack' making Vagrantfile and VM more useful 2013-05-01 15:26:27 -07:00
Guillaume J. Charmes
a75a1b3859 When no -config is set while committing, use the config of the base image 2013-05-01 15:24:28 -07:00
Solomon Hykes
08812096f5 New Dockerfile operation 'expose' exposes default tcp ports 2013-05-01 14:16:56 -07:00
Solomon Hykes
5c30faf6f7 Set a layer's default runtime options with 'docker commit -run' instead of 'docker commit -config' 2013-05-01 12:45:45 -07:00
Solomon Hykes
f7aaa06606 + Commit default runtime options with a layer 2013-05-01 11:33:21 -07:00
Guillaume J. Charmes
7ff65d40d5 Actually use the mergeConfig function 2013-05-01 11:22:06 -07:00
Ken Cochrane
904c2a0fc3 added the registry API to the docker docs 2013-05-01 12:31:46 -04:00
Solomon Hykes
a3ce90b78b Added dummy script for docker-build example 2013-05-01 00:49:28 -07:00
Solomon Hykes
03b83b3210 Fix example dockerfile 2013-05-01 00:44:36 -07:00
Solomon Hykes
40ccf1d300 new Dockerfile keyword: 'push' 2013-05-01 00:42:11 -07:00
Solomon Hykes
038ca5ee39 docker-build: added support for 'maintainer' keyword 2013-05-01 00:14:52 -07:00
Solomon Hykes
957c500ac9 Merge pull request #485 from brunoqc/patch-1
* Packaging: connect to Ubuntu key server on port 80
2013-04-30 15:46:57 -07:00
Guillaume J. Charmes
62a595da5c Merge pull request #488 from tobert/cgroups-via-proc-mounts
* runtime: Use /proc/mounts instead of mount(8)
2013-04-30 14:39:14 -07:00
Guillaume J. Charmes
d97661aa71 Improve crashTest 2013-04-30 11:16:26 -07:00
Al Tobey
c6119da339 Use /proc/mounts instead of mount(8)
Specifically, Ubuntu Precise's cgroup-lite script uses mount -n
to mount the cgroup filesystems so they don't appear in mtab, so
detection always fails unless the admin updates mtab with /proc/mounts.

/proc/mounts is valid on just about every Linux machine in existence and
as a bonus is much easier to parse.

I also removed the regex in favor of a more accurate parser that should
also support monolitic cgroup mounts (e.g. mount -t cgroup none /cgroup).
2013-04-30 17:37:43 +00:00
Bruno Bigras
5051c20833 Use the 80 port with keyserver.ubuntu.com
Use the 80 port with keyserver.ubuntu.com so it works with corporate firewalls
2013-04-29 15:53:50 -03:00
Guillaume J. Charmes
cdc2657ee9 Improve crashTest 2013-04-28 07:10:58 -07:00
Guillaume J. Charmes
76a1a7cf5b Simplify the crashTest 2013-04-28 06:23:02 -07:00
Guillaume J. Charmes
20c2a4f80f add network endpoint for crashTest 2013-04-28 03:54:22 -07:00
Guillaume J. Charmes
ebe157ebb5 Update the crashTest to have the dockerpath in env 2013-04-28 01:27:56 -07:00
Solomon Hykes
cb431f223f Merge pull request #484 from tianon/mkimage-debian
* Contrib: updated mkimage-debian
2013-04-29 12:12:02 -07:00
Tianon Gravi
ab34115b42 Use default mirror from debootstrap when not explicitly provided, and add better target directory naming 2013-04-28 13:38:26 -06:00
Tianon Gravi
4b3354af3f Improve mkimage-debian script to also tag using the release version number of the final image (6.0.7, 7.0, etc.)
This is as discussed on #447.
2013-04-28 12:31:28 -06:00
Guillaume J. Charmes
9042535f5a Move the capabilities detection into a runtime method 2013-04-26 14:32:55 -07:00
Guillaume J. Charmes
8f81e175af Merge pull request #473 from dotcloud/26-auto_restart_containers-feature
+ runtime: Add -r flag to dockerd in order to restart previously running container....
2013-04-26 14:02:01 -07:00
Guillaume J. Charmes
636c7835d3 Merge pull request #467 from dotcloud/improve_localhost_port_test
* tests: Improve unit test to avoid unnecessary warnigns
2013-04-26 14:01:13 -07:00
Guillaume J. Charmes
6d1dd8b41a Merge pull request #478 from tianon/mkimage-debian
+ contrib: Add contrib/mkimage-debian.sh used to create the tianon/debian images
2013-04-26 13:51:47 -07:00
Guillaume J. Charmes
ae97477284 Remove -command in CmdCommit and make -config use Json 2013-04-26 10:48:33 -07:00
Tianon Gravi
86ad98e72a Add contrib/mkimage-debian.sh used to create the tianon/debian images 2013-04-26 08:54:29 -06:00
Solomon Hykes
03d82922aa Merge pull request #474 from brianm/vmware_fusion_provider
Support for VMWare Fusion Provider in Vagrantfile
2013-04-26 01:31:11 -07:00
Guillaume J. Charmes
30d327d37e Add TestCommitAutoRun 2013-04-25 17:03:13 -07:00
Guillaume J. Charmes
724e2d6b0a Update unit test in order to comply with new api 2013-04-25 17:02:38 -07:00
Guillaume J. Charmes
51d6228261 Implement -config and -command in CmdCommit in order to allow autorun 2013-04-25 16:48:31 -07:00
Brian McCallister
4db680fda4 don't fight the box kernel version, not worth it 2013-04-25 06:29:13 -06:00
Brian McCallister
9c7293508d get aufs dependencies into vmware image 2013-04-25 06:09:04 -06:00
Brian McCallister
9d8743a7ae vmware fusion provider config 2013-04-25 05:59:31 -06:00
Guillaume J. Charmes
50144aeb42 Add -r flag to dockerd in order to restart previously running container. Fixes #26 2013-04-24 19:01:23 -07:00
Guillaume J. Charmes
ee298d1420 Specify a different bridge for tests than for regular runtime 2013-04-24 17:43:41 -07:00
Solomon Hykes
03855b0027 Merge pull request #466 from dotcloud/441-vagrant-improve
* Packaging: simplify Vagrantfile
2013-04-24 17:25:20 -07:00
Daniel Mizyrycki
2726e3649a vagrant; issue #441: Improve main config including aws ubuntu lts dependency 2013-04-24 11:30:15 -07:00
Solomon Hykes
90668a8a99 Bumped version to 0.2.0 2013-04-23 23:15:09 -07:00
Solomon Hykes
c7fd84b8a0 Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-04-23 23:05:38 -07:00
Solomon Hykes
874a40ed3a - Dev: dockerbuilder requires a fake initctl because 'apt-get install devscripts' insists on installing a stupid daemon I never asked for in the first place. 2013-04-23 23:04:54 -07:00
Solomon Hykes
370fafacbf Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-04-23 22:57:50 -07:00
Solomon Hykes
a0478f726d dockerbuilder: upload most recent Ubuntu package (note version FOO might not yet be packaged at tag vFOO) 2013-04-23 22:57:34 -07:00
Solomon Hykes
e5bc5a2e31 Merge pull request #427 from dhrp/docs
- Packaging: Fixed Vagrantfile
* Documentation: Updated install instructions
2013-04-23 19:49:28 -07:00
Solomon Hykes
25fc3a7e76 Merge pull request #470 from dotcloud/packaging-ubuntu
* Packaging: Add 0.1.8 to Ubuntu packaging changelog
* Packaging: Update the Ubuntu maintainer manual
2013-04-23 19:46:56 -07:00
Solomon Hykes
b3ab0b561e Makefile improvements
+ Convenience rules: srcrelease, deps
	- Separate dependency vendoring from building the binary
	  (re-download dependencies with 'make deps')
2013-04-23 19:41:38 -07:00
Solomon Hykes
8b8c8bf7cb Fix 'make release RELEASE_VERSION=master' 2013-04-23 18:50:53 -07:00
Solomon Hykes
a8651a23b2 make release: build a binary release of the most recent version tag 2013-04-23 18:32:59 -07:00
Daniel Mizyrycki
f744cfd5a7 packaging-ubuntu: update maintainer documentation for changelog file 2013-04-23 13:51:03 -07:00
Solomon Hykes
e03b241fb1 dockerbuilder: build with 'make; cp -R ./bin' 2013-04-23 12:07:54 -07:00
Thatcher Peskens
1ddca1948b Fixed remaining issues and conflicts created by last merge. 2013-04-23 12:04:53 -07:00
Solomon Hykes
2485bb2cd2 dockerbuilder: use a pristine GOPATH, with the fresh checkout registered at the right path (for internal submodules) 2013-04-23 11:45:47 -07:00
Guillaume J. Charmes
6ebb249131 Remove unecessary memeory limit within tests 2013-04-23 11:25:16 -07:00
Guillaume J. Charmes
c45beabcd5 Improve TestMultipleAttachRestart to avoid unnecessary warning 2013-04-23 11:22:30 -07:00
Guillaume J. Charmes
a22c78523f Wait for the container to finish in TestAttachDisconnect before destroying it 2013-04-23 11:09:48 -07:00
Guillaume J. Charmes
5a02c9ba0a Make sure the container is well started prior to perform the test 2013-04-23 11:08:31 -07:00
Solomon Hykes
7577f48dc4 dockerbuilder: build in current directory instead /go and /tmp 2013-04-23 10:53:02 -07:00
Solomon Hykes
0512cf9c83 dockerbuilder: /usr/local/bin is already set by docker 2013-04-23 10:49:58 -07:00
Solomon Hykes
73da7a12e7 Increased timeout in TCP port allocation test to pass on slower machines 2013-04-23 10:12:46 -07:00
Solomon Hykes
50f5723f1d Merge pull request #465 from shamrin/patch-2
- Documentation: fixed typo in "Building blocks"
2013-04-23 09:02:19 -07:00
Alexey Shamrin
cbc4eccd50 fixed typo in buildingblocks.rst 2013-04-23 12:52:55 +04:00
Solomon Hykes
cff26b3a6c Merge pull request #464 from tianon/patch-1
- Runtime: adapt cgroup capability detection to work on Gentoo
2013-04-23 00:36:10 -07:00
Solomon Hykes
329c3e0ffd Merge pull request #462 from dotcloud/initial_changelog
+ Add initial Changelog
2013-04-23 00:32:25 -07:00
Solomon Hykes
4f6cc5c733 Completed Changelog for all past versions 2013-04-23 00:30:18 -07:00
Tianon Gravi
e413340723 Update FindCgroupMountpoint to be more forgiving
On Gentoo, the memory cgroup is mounted at /sys/fs/cgroup/memory, but the mount line looks like the following:
memory on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)

(note that the first word on the line is "memory", not "cgroup", but the other essentials are there, namely the type of cgroup and the memory mount option)
2013-04-23 01:09:29 -06:00
Solomon Hykes
95e066d24f - Runtime: ghost containers can be killed. 2013-04-22 22:30:33 -07:00
Solomon Hykes
82b8f7a565 hack/dockerbuilder: a standard build environment for building and uploading official binary builds of docker... inside docker 2013-04-22 22:29:12 -07:00
Solomon Hykes
97badbd29e Bumped version to 0.1.8 2013-04-22 22:04:57 -07:00
Thatcher Peskens
5a5e417d46 Merge remote-tracking branch 'dotcloud/master' into dhrp/docs
Conflicts:
	docker/docker.go
	docs/sources/installation/archlinux.rst
	docs/sources/installation/index.rst
	docs/sources/installation/ubuntulinux.rst
	runtime.go
	utils.go
2013-04-22 18:44:50 -07:00
Thatcher Peskens
4031a01af1 Merged changes 2013-04-22 18:38:42 -07:00
Guillaume J. Charmes
0b0d958b88 Merge pull request #463 from dotcloud/improve_pid_file_feature
Check that the pid in pidfile exists before preventing docker to start
2013-04-22 18:24:03 -07:00
Guillaume J. Charmes
03e4704ae5 Merge pull request #442 from dotcloud/fix_deleted_file_diff
Use aufs to handle parents whiteouts instead of doing it manually
2013-04-22 18:23:46 -07:00
Guillaume J. Charmes
7a8ac76299 Merge pull request #456 from dotcloud/453-generic_kernel_detection-fix
453 generic kernel detection fix
2013-04-22 18:20:17 -07:00
Guillaume J. Charmes
c05c91ca3b Make kernel detection work without suffix 2013-04-22 18:15:33 -07:00
Guillaume J. Charmes
b76d63cb0c Forbid attach to ghost 2013-04-22 17:53:32 -07:00
Guillaume J. Charmes
f926ed182f Allow to kill/stop ghosts 2013-04-22 17:53:32 -07:00
Guillaume J. Charmes
d440782e17 Allow to kill container after docker server restarts 2013-04-22 17:52:38 -07:00
Guillaume J. Charmes
82848d4158 Allow to wait on container even after docker server restarts using lxc-info 2013-04-22 17:52:38 -07:00
Guillaume J. Charmes
97535e5a64 Add unit test for file deletion 2013-04-22 17:51:09 -07:00
Guillaume J. Charmes
f079fbe3fa Check that the pid in pidfile exists before preventing docker to start 2013-04-22 15:57:31 -07:00
Guillaume J. Charmes
90d144b612 Merge pull request #457 from shamrin/patch-1
README.md: `docker port` instead of just `port`
2013-04-22 15:40:03 -07:00
Guillaume J. Charmes
d3db94696d Merge pull request #461 from neomantra/master
Fix typo (ghot -> ghost)
2013-04-22 15:39:34 -07:00
Evan Wies
ffe16e3224 Fix typo (ghot -> ghost) 2013-04-22 18:37:06 -04:00
Guillaume J. Charmes
0b60829df7 Add initial changelog 2013-04-22 15:26:06 -07:00
Thatcher Peskens
690e118670 Updated gettingstarted with quicker install. 2013-04-22 13:36:00 -07:00
Alexey Shamrin
038e1d174b README.md: docker port instead of just port 2013-04-23 00:27:23 +04:00
Thatcher Peskens
6c8dcd5cbb Updated Vagrantfile and documentation to reflect new installation path using Ubuntu's PPA, also switched everything to use Ubuntu 12.04 by default. 2013-04-22 13:10:32 -07:00
Guillaume J. Charmes
16aeb77d51 Move the kernel detection to arch specific files 2013-04-22 12:08:59 -07:00
Guillaume J. Charmes
4ac3b803b9 Make the kernel version detection more generic 2013-04-22 11:39:56 -07:00
Guillaume J. Charmes
3514e47edc Do not prevent docker from running when kernel detection fails 2013-04-22 11:26:34 -07:00
Guillaume J. Charmes
acb546cd1b Fix race within TestRunDisconnectTty 2013-04-22 11:16:32 -07:00
Guillaume J. Charmes
2ced94b414 Merge pull request #454 from tianon/master
Update utils.go to not enforce extra constraints on the kernel "flavor" (such as being integral or even comparable one to another)
2013-04-22 08:02:42 -07:00
Guillaume J. Charmes
71b5806614 Do not stop execution if cgroup mountpoint is not found 2013-04-22 00:44:57 -04:00
Tianon Gravi
1f65c6bf4c Update utils.go to not enforce extra constraints on the kernel "flavor" (such as being integral or even comparable one to another)
This is especially to fix the current docker on kernels such as gentoo-sources, where the "flavor" is the string "gentoo", and that obviously fails to be converted to an integer.
2013-04-21 19:19:38 -06:00
Solomon Hykes
965e8a02d2 'docker push' shows an additional progress bar while it buffers the archive to disk. Fixes #451. 2013-04-21 15:29:26 -07:00
Solomon Hykes
baacae8345 'docker push' buffers filesystem archives on disk instead of memory. 2013-04-21 14:23:55 -07:00
Solomon Hykes
52cedb8a05 Better title in ubuntu install doc 2013-04-20 18:26:15 -07:00
Solomon Hykes
15c7e72e2a Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-04-20 18:19:04 -07:00
Solomon Hykes
76b40ad6c9 Merge remote-tracking branch 'origin/check_kernel_capabilities' 2013-04-20 17:40:25 -07:00
Solomon Hykes
6909f3911f Merge pull request #422 from shawnsi/arch-docs
Arch docs
2013-04-20 17:35:43 -07:00
Thatcher Peskens
0731d1a582 Updated ubuntu install 2013-04-19 20:59:43 -07:00
Thatcher Peskens
8ecde8f9a5 Updated documentation and fixed Vagrantfile 2013-04-19 20:57:50 -07:00
Guillaume J. Charmes
e49af5b6de Use aufs to handle parents whitouts instead of doing it manually 2013-04-19 16:33:25 -07:00
Guillaume J. Charmes
f3e89fae28 Use mount to determine the cgroup mountpoint 2013-04-18 21:57:58 -07:00
Guillaume J. Charmes
c42a4179fc Add unit tests for CompareKernelVersion 2013-04-18 21:34:34 -07:00
Guillaume J. Charmes
2d32ac8cff Improve the docker version output 2013-04-18 21:08:33 -07:00
Guillaume J. Charmes
f68d107a13 Remove the NO_MEMORY_LIMIT constant 2013-04-18 21:08:20 -07:00
Guillaume J. Charmes
640efc2ed2 Add capabilities check to allow docker to run on kernel that does not have all options 2013-04-18 20:55:41 -07:00
Guillaume J. Charmes
003622c8b6 Check kernel version and display warning if too low 2013-04-18 20:47:24 -07:00
Thatcher Peskens
6de5ca1e64 Added redirect from old location of documentation (/documentation), these was the location when we were on github. 2013-04-18 16:00:18 -07:00
Shawn Siefkas
7eda9c64b8 Updating the arch linux installation docs
New AUR package name
Adding systemd service unit info
2013-04-18 09:20:23 -05:00
Shawn Siefkas
84c13a3dcf Adding archlinux packaging documentation 2013-04-18 09:17:31 -05:00
70 changed files with 2315 additions and 543 deletions

103
CHANGELOG.md Normal file
View File

@@ -0,0 +1,103 @@
# Changelog
## 0.2.2 (2012-05-03)
+ Support for data volumes ('docker run -v=PATH')
+ Share data volumes between containers ('docker run -volumes-from')
+ Improved documentation
* Upgrade to Go 1.0.3
* Various upgrades to the dev environment for contributors
## 0.2.1 (2012-05-01)
+ 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
* Improve install process on Vagrant
+ New Dockerfile operation: "maintainer"
+ New Dockerfile operation: "expose"
+ New Dockerfile operation: "cmd"
+ Contrib script to build a Debian base layer
+ 'docker -d -r': restart crashed containers at daemon startup
* Runtime: improve test coverage
## 0.2.0 (2012-04-23)
- Runtime: ghost containers can be killed and waited for
* Documentation: update install intructions
- Packaging: fix Vagrantfile
- Development: automate releasing binaries and ubuntu packages
+ Add a changelog
- Various bugfixes
## 0.1.8 (2013-04-22)
- Dynamically detect cgroup capabilities
- Issue stability warning on kernels <3.8
- 'docker push' buffers on disk instead of memory
- Fix 'docker diff' for removed files
- Fix 'docker stop' for ghost containers
- Fix handling of pidfile
- Various bugfixes and stability improvements
## 0.1.7 (2013-04-18)
- Container ports are available on localhost
- 'docker ps' shows allocated TCP ports
- Contributors can run 'make hack' to start a continuous integration VM
- Streamline ubuntu packaging & uploading
- Various bugfixes and stability improvements
## 0.1.6 (2013-04-17)
- Record the author an image with 'docker commit -author'
## 0.1.5 (2013-04-17)
- Disable standalone mode
- Use a custom DNS resolver with 'docker -d -dns'
- Detect ghost containers
- Improve diagnosis of missing system capabilities
- Allow disabling memory limits at compile time
- Add debian packaging
- Documentation: installing on Arch Linux
- Documentation: running Redis on docker
- Fixed lxc 0.9 compatibility
- Automatically load aufs module
- Various bugfixes and stability improvements
## 0.1.4 (2013-04-09)
- Full support for TTY emulation
- Detach from a TTY session with the escape sequence `C-p C-q`
- Various bugfixes and stability improvements
- Minor UI improvements
- Automatically create our own bridge interface 'docker0'
## 0.1.3 (2013-04-04)
- Choose TCP frontend port with '-p :PORT'
- Layer format is versioned
- Major reliability improvements to the process manager
- Various bugfixes and stability improvements
## 0.1.2 (2013-04-03)
- Set container hostname with 'docker run -h'
- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
- Various bugfixes and stability improvements
- UI polish
- Progress bar on push/pull
- Use XZ compression by default
- Make IP allocator lazy
## 0.1.1 (2013-03-31)
- Display shorthand IDs for convenience
- Stabilize process management
- Layers can include a commit message
- Simplified 'docker attach'
- Fixed support for re-attaching
- Various bugfixes and stability improvements
- Auto-download at run
- Auto-login on push
- Beefed up documentation
## 0.1.0 (2013-03-23)
- First release
- Implement registry in order to push/pull images
- TCP port allocation
- Fix termcaps on Linux
- Add documentation
- Add Vagrant support with Vagrantfile
- Add unit tests
- Add repository/tags to ease image management
- Improve the layer implementation

View File

@@ -1,5 +1,9 @@
DOCKER_PACKAGE := github.com/dotcloud/docker
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
SRCRELEASE := docker-$(RELEASE_VERSION)
BINRELEASE := docker-$(RELEASE_VERSION).tgz
GIT_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(CURDIR)/.gopath
GOPATH ?= $(BUILD_DIR)
@@ -13,10 +17,7 @@ endif
GIT_COMMIT = $(shell git rev-parse --short HEAD)
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
NO_MEMORY_LIMIT ?= 0
export NO_MEMORY_LIMIT
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS) -X main.NO_MEMORY_LIMIT $(NO_MEMORY_LIMIT)"
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
SRC_DIR := $(GOPATH)/src
@@ -26,18 +27,39 @@ DOCKER_MAIN := $(DOCKER_DIR)/docker
DOCKER_BIN_RELATIVE := bin/docker
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
.PHONY: all clean test hack
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
all: $(DOCKER_BIN)
$(DOCKER_BIN): $(DOCKER_DIR)
@mkdir -p $(dir $@)
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
@echo $(DOCKER_BIN_RELATIVE) is created.
$(DOCKER_DIR):
@mkdir -p $(dir $@)
@ln -sf $(CURDIR)/ $@
@if [ -h $@ ]; then rm -f $@; fi; ln -sf $(CURDIR)/ $@
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
whichrelease:
echo $(RELEASE_VERSION)
release: $(BINRELEASE)
s3cmd -P put $(BINRELEASE) s3://get.docker.io/builds/`uname -s`/`uname -m`/docker-$(RELEASE_VERSION).tgz
srcrelease: $(SRCRELEASE)
deps: $(DOCKER_DIR)
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
$(SRCRELEASE):
rm -fr $(SRCRELEASE)
git clone $(GIT_ROOT) $(SRCRELEASE)
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
# A binary release ready to be uploaded to a mirror
$(BINRELEASE): $(SRCRELEASE)
rm -f $(BINRELEASE)
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
clean:
@rm -rf $(dir $(DOCKER_BIN))
@@ -54,4 +76,7 @@ fmt:
@gofmt -s -l -w .
hack:
cd $(CURDIR)/buildbot && vagrant up
cd $(CURDIR)/hack && vagrant up
ssh-dev:
cd $(CURDIR)/hack && vagrant ssh

View File

@@ -125,7 +125,7 @@ Running an irc bouncer
```bash
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
echo "Configure your irc client to connect to port $(port $BOUNCER_ID 6667) of this machine"
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
```
Running Redis
@@ -133,7 +133,7 @@ Running Redis
```bash
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
echo "Configure your redis client to connect to port $(port $REDIS_ID 6379) of this machine"
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
```
Share your own image!

48
Vagrantfile vendored
View File

@@ -1,41 +1,37 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
def v10(config)
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
BOX_NAME = "ubuntu"
BOX_URI = "http://files.vagrantup.com/precise64.box"
PPA_KEY = "E61D797F63561DC6"
config.vm.share_folder "v-data", "/opt/go/src/github.com/dotcloud/docker", File.dirname(__FILE__)
# Ensure puppet is installed on the instance
config.vm.provision :shell, :inline => "apt-get -qq update; apt-get install -y puppet"
config.vm.provision :puppet do |puppet|
puppet.manifests_path = "puppet/manifests"
puppet.manifest_file = "quantal64.pp"
puppet.module_path = "puppet/modules"
Vagrant::Config.run do |config|
# Setup virtual machine box. This VM configuration code is always executed.
config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI
# Add docker PPA key to the local repository and install docker
pkg_cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys #{PPA_KEY}; "
pkg_cmd << "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >/etc/apt/sources.list.d/lxc-docker.list; "
pkg_cmd << "apt-get update -qq; apt-get install -q -y lxc-docker"
if ARGV.include?("--provider=aws".downcase)
# Add AUFS dependency to amazon's VM
pkg_cmd << "; apt-get install linux-image-extra-3.2.0-40-virtual"
end
config.vm.provision :shell, :inline => pkg_cmd
end
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("1") do |config|
v10(config)
end
# Providers were added on Vagrant >= 1.1.0
Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws|
config.vm.provider :aws do |aws, override|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.username = "ubuntu"
aws.region = "us-east-1"
aws.ami = "ami-ae9806c7"
aws.ssh_username = "ubuntu"
aws.ami = "ami-d0f89fb9"
aws.instance_type = "t1.micro"
end
@@ -51,7 +47,7 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
end
config.vm.provider :virtualbox do |vb|
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI
end
end

View File

@@ -4,6 +4,7 @@ import (
"errors"
"io"
"io/ioutil"
"os"
"os/exec"
)
@@ -86,3 +87,38 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
}
return pipeR, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
return &TempArchive{f, size}, nil
}
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience
}
func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data)
if err != nil {
os.Remove(archive.File.Name())
}
return n, err
}

View File

@@ -10,6 +10,7 @@ import (
"log"
"net/http"
"net/url"
"path/filepath"
"runtime"
"strconv"
"strings"
@@ -18,11 +19,10 @@ import (
"unicode"
)
const VERSION = "0.1.7"
const VERSION = "0.2.2"
var (
GIT_COMMIT string
NO_MEMORY_LIMIT bool
GIT_COMMIT string
)
func (srv *Server) Name() string {
@@ -184,10 +184,14 @@ func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string
// 'docker version': show version information
func (srv *Server) CmdVersion(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
fmt.Fprintf(stdout, "Version:%s\n", VERSION)
fmt.Fprintf(stdout, "Git Commit:%s\n", GIT_COMMIT)
if NO_MEMORY_LIMIT {
fmt.Fprintf(stdout, "Memory limit disabled\n")
fmt.Fprintf(stdout, "Version: %s\n", VERSION)
fmt.Fprintf(stdout, "Git Commit: %s\n", GIT_COMMIT)
fmt.Fprintf(stdout, "Kernel: %s\n", srv.runtime.kernelVersion)
if !srv.runtime.capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: No memory limit support\n")
}
if !srv.runtime.capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: No swap limit support\n")
}
return nil
}
@@ -397,7 +401,8 @@ func (srv *Server) CmdHistory(stdin io.ReadCloser, stdout io.Writer, args ...str
}
func (srv *Server) CmdRm(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "rm", "CONTAINER [CONTAINER...]", "Remove a container")
cmd := rcli.Subcmd(stdout, "rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove a container")
v := cmd.Bool("v", false, "Remove the volumes associated to the container")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -405,15 +410,40 @@ func (srv *Server) CmdRm(stdin io.ReadCloser, stdout io.Writer, args ...string)
cmd.Usage()
return nil
}
volumes := make(map[string]struct{})
for _, name := range cmd.Args() {
container := srv.runtime.Get(name)
if container == nil {
return fmt.Errorf("No such container: %s", name)
}
// Store all the deleted containers volumes
for _, volumeId := range container.Volumes {
volumes[volumeId] = struct{}{}
}
if err := srv.runtime.Destroy(container); err != nil {
fmt.Fprintln(stdout, "Error destroying container "+name+": "+err.Error())
}
}
if *v {
// Retrieve all volumes from all remaining containers
usedVolumes := make(map[string]*Container)
for _, container := range srv.runtime.List() {
for _, containerVolumeId := range container.Volumes {
usedVolumes[containerVolumeId] = container
}
}
for volumeId := range volumes {
// If the requested volu
if c, exists := usedVolumes[volumeId]; exists {
fmt.Fprintf(stdout, "The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.Id)
continue
}
if err := srv.runtime.volumes.Delete(volumeId); err != nil {
return err
}
}
}
return nil
}
@@ -472,9 +502,9 @@ func (srv *Server) CmdImport(stdin io.ReadCloser, stdout rcli.DockerConn, args .
if err != nil {
return err
}
archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout)
archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout, "Importing %v/%v (%v)")
}
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "")
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
if err != nil {
return err
}
@@ -723,6 +753,7 @@ func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...stri
"Create a new image from a container's changes")
flComment := cmd.String("m", "", "Commit message")
flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
flConfig := cmd.String("run", "", "Config automatically applied when the image is run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -731,7 +762,16 @@ func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...stri
cmd.Usage()
return nil
}
img, err := srv.runtime.Commit(containerName, repository, tag, *flComment, *flAuthor)
var config *Config
if *flConfig != "" {
config = &Config{}
if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
return err
}
}
img, err := srv.runtime.Commit(containerName, repository, tag, *flComment, *flAuthor, config)
if err != nil {
return err
}
@@ -833,6 +873,10 @@ func (srv *Server) CmdAttach(stdin io.ReadCloser, stdout rcli.DockerConn, args .
return fmt.Errorf("No such container: %s", name)
}
if container.State.Ghost {
return fmt.Errorf("Impossible to attach to a ghost container")
}
if container.Config.Tty {
stdout.SetOptionRawTerminal()
}
@@ -896,6 +940,25 @@ func (opts AttachOpts) Get(val string) bool {
return false
}
// PathOpts stores a unique set of absolute paths
type PathOpts map[string]struct{}
func NewPathOpts() PathOpts {
return make(PathOpts)
}
func (opts PathOpts) String() string {
return fmt.Sprintf("%v", map[string]struct{}(opts))
}
func (opts PathOpts) Set(val string) error {
if !filepath.IsAbs(val) {
return fmt.Errorf("%s is not an absolute path", val)
}
opts[filepath.Clean(val)] = struct{}{}
return nil
}
func (srv *Server) CmdTag(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "tag", "[OPTIONS] IMAGE REPOSITORY [TAG]", "Tag an image into a repository")
force := cmd.Bool("f", false, "Force")
@@ -910,7 +973,7 @@ func (srv *Server) CmdTag(stdin io.ReadCloser, stdout io.Writer, args ...string)
}
func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error {
config, err := ParseRun(args, stdout)
config, err := ParseRun(args, stdout, srv.runtime.capabilities)
if err != nil {
return err
}
@@ -918,10 +981,6 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s
fmt.Fprintln(stdout, "Error: Image not specified")
return fmt.Errorf("Image not specified")
}
if len(config.Cmd) == 0 {
fmt.Fprintln(stdout, "Error: Command not specified")
return fmt.Errorf("Command not specified")
}
if config.Tty {
stdout.SetOptionRawTerminal()
@@ -976,16 +1035,21 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s
}
Debugf("Waiting for attach to return\n")
<-attachErr
container.Wait()
// Expecting I/O pipe error, discarding
// If we are in stdinonce mode, wait for the process to end
// otherwise, simply return
if config.StdinOnce && !config.Tty {
container.Wait()
}
return nil
}
func NewServer() (*Server, error) {
func NewServer(autoRestart bool) (*Server, error) {
if runtime.GOARCH != "amd64" {
log.Fatalf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
}
runtime, err := NewRuntime()
runtime, err := NewRuntime(autoRestart)
if err != nil {
return nil, err
}

View File

@@ -228,6 +228,21 @@ func TestRunDisconnectTty(t *testing.T) {
close(c1)
}()
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for {
// Client disconnect after run -i should keep stdin out in TTY mode
l := runtime.List()
if len(l) == 1 && l[0].State.Running {
break
}
time.Sleep(10 * time.Millisecond)
}
})
// Client disconnect after run -i should keep stdin out in TTY mode
container := runtime.List()[0]
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
@@ -242,8 +257,6 @@ func TestRunDisconnectTty(t *testing.T) {
// In tty mode, we expect the process to stay alive even after client's stdin closes.
// Do not wait for run to finish
// Client disconnect after run -i should keep stdin out in TTY mode
container := runtime.List()[0]
// Give some time to monitor to do his thing
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {
@@ -381,4 +394,5 @@ func TestAttachDisconnect(t *testing.T) {
// Try to avoid the timeoout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
container.Wait()
}

View File

@@ -48,6 +48,7 @@ type Container struct {
runtime *Runtime
waitLock chan struct{}
Volumes map[string]string
}
type Config struct {
@@ -66,9 +67,11 @@ type Config struct {
Cmd []string
Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
VolumesFrom string
}
func ParseRun(args []string, stdout io.Writer) (*Config, error) {
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
@@ -83,8 +86,8 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) {
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
if *flMemory > 0 && NO_MEMORY_LIMIT {
fmt.Fprintf(stdout, "WARNING: This version of docker has been compiled without memory limit support. Discarding -m.")
if *flMemory > 0 && !capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
@@ -97,6 +100,11 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) {
var flDns ListOpts
cmd.Var(&flDns, "dns", "Set custom dns servers")
flVolumes := NewPathOpts()
cmd.Var(flVolumes, "v", "Attach a data volume")
flVolumesFrom := cmd.String("volumes-from", "", "Mount volumes from the specified container")
if err := cmd.Parse(args); err != nil {
return nil, err
}
@@ -136,7 +144,15 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) {
Cmd: runCmd,
Dns: flDns,
Image: image,
Volumes: flVolumes,
VolumesFrom: *flVolumesFrom,
}
if *flMemory > 0 && !capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
@@ -379,14 +395,49 @@ func (container *Container) Start() error {
return err
}
if container.Config.Memory > 0 && NO_MEMORY_LIMIT {
log.Printf("WARNING: This version of docker has been compiled without memory limit support. Discarding the limit.")
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
container.Volumes = make(map[string]string)
// Create the requested volumes volumes
for volPath := range container.Config.Volumes {
if c, err := container.runtime.volumes.Create(nil, container, "", "", nil); err != nil {
return err
} else {
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = c.Id
}
}
if container.Config.VolumesFrom != "" {
c := container.runtime.Get(container.Config.VolumesFrom)
if c == nil {
return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.Id)
}
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
return fmt.Errorf("The requested volume %s overlap one of the volume of the container %s", volPath, c.Id)
}
if err := os.MkdirAll(path.Join(container.RootfsPath(), volPath), 0755); err != nil {
return nil
}
container.Volumes[volPath] = id
}
}
if err := container.generateLXCConfig(); err != nil {
return err
}
params := []string{
"-n", container.Id,
"-f", container.lxcConfigPath(),
@@ -445,6 +496,7 @@ func (container *Container) Start() error {
// Init the lock
container.waitLock = make(chan struct{})
container.ToDisk()
go container.monitor()
return nil
@@ -519,16 +571,42 @@ func (container *Container) releaseNetwork() {
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
return nil
}
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (container *Container) monitor() {
// Wait for the program to exit
Debugf("Waiting for process")
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
Debugf("%s: Process: %s", container.Id, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
}
}
Debugf("Process finished")
exitCode := container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
var exitCode int = -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Cleanup
container.releaseNetwork()
@@ -577,7 +655,7 @@ func (container *Container) monitor() {
}
func (container *Container) kill() error {
if !container.State.Running || container.cmd == nil {
if !container.State.Running {
return nil
}
@@ -589,6 +667,9 @@ func (container *Container) kill() error {
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
}
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
if err := container.cmd.Process.Kill(); err != nil {
return err
@@ -606,9 +687,6 @@ func (container *Container) Kill() error {
if !container.State.Running {
return nil
}
if container.State.Ghost {
return fmt.Errorf("Can't kill ghost container")
}
return container.kill()
}
@@ -618,9 +696,6 @@ func (container *Container) Stop(seconds int) error {
if !container.State.Running {
return nil
}
if container.State.Ghost {
return fmt.Errorf("Can't stop ghot container")
}
// 1. Send a SIGTERM
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
@@ -753,6 +828,22 @@ func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) GetVolumes() (map[string]string, error) {
ret := make(map[string]string)
for volPath, id := range container.Volumes {
volume, err := container.runtime.volumes.Get(id)
if err != nil {
return nil, err
}
root, err := volume.root()
if err != nil {
return nil, err
}
ret[volPath] = path.Join(root, "layer")
}
return ret, nil
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}

View File

@@ -22,9 +22,8 @@ func TestIdFormat(t *testing.T) {
defer nuke(runtime)
container1, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
Memory: 33554432,
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello world"},
},
)
if err != nil {
@@ -50,7 +49,6 @@ func TestMultipleAttachRestart(t *testing.T) {
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c",
"i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
Memory: 33554432,
},
)
if err != nil {
@@ -116,8 +114,8 @@ func TestMultipleAttachRestart(t *testing.T) {
if err := container.Start(); err != nil {
t.Fatal(err)
}
timeout := make(chan bool)
go func() {
setTimeout(t, "Timeout reading from the process", 3*time.Second, func() {
l1, err = bufio.NewReader(stdout1).ReadString('\n')
if err != nil {
t.Fatal(err)
@@ -139,18 +137,87 @@ func TestMultipleAttachRestart(t *testing.T) {
if strings.Trim(l3, " \r\n") != "hello" {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
}
timeout <- false
}()
go func() {
time.Sleep(3 * time.Second)
timeout <- true
}()
if <-timeout {
t.Fatalf("Timeout reading from the process")
})
container.Wait()
}
func TestDiff(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
// Create a container and remove a file
container1, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/rm", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if err := container1.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err := container1.Changes()
if err != nil {
t.Fatal(err)
}
success := false
for _, elem := range c {
if elem.Path == "/etc/passwd" && elem.Kind == 2 {
success = true
}
}
if !success {
t.Fatalf("/etc/passwd as been removed but is not present in the diff")
}
// Commit the container
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "", nil)
if err != nil {
t.Error(err)
}
// Create a new container from the commited image
container2, err := runtime.Create(
&Config{
Image: img.Id,
Cmd: []string{"cat", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
if err := container2.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err = container2.Changes()
if err != nil {
t.Fatal(err)
}
for _, elem := range c {
if elem.Path == "/etc/passwd" {
t.Fatalf("/etc/passwd should not be present in the diff after commit.")
}
}
}
func TestCommitRun(t *testing.T) {
func TestCommitAutoRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
@@ -158,9 +225,8 @@ func TestCommitRun(t *testing.T) {
defer nuke(runtime)
container1, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
Memory: 33554432,
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
@@ -182,7 +248,7 @@ func TestCommitRun(t *testing.T) {
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "")
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", &Config{Cmd: []string{"cat", "/world"}})
if err != nil {
t.Error(err)
}
@@ -191,9 +257,86 @@ func TestCommitRun(t *testing.T) {
container2, err := runtime.Create(
&Config{
Image: img.Id,
Memory: 33554432,
Cmd: []string{"cat", "/world"},
Image: img.Id,
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
if err != nil {
t.Fatal(err)
}
stderr, err := container2.StderrPipe()
if err != nil {
t.Fatal(err)
}
if err := container2.Start(); err != nil {
t.Fatal(err)
}
container2.Wait()
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
}
output2, err := ioutil.ReadAll(stderr)
if err != nil {
t.Fatal(err)
}
if err := stdout.Close(); err != nil {
t.Fatal(err)
}
if err := stderr.Close(); err != nil {
t.Fatal(err)
}
if string(output) != "hello\n" {
t.Fatalf("Unexpected output. Expected %s, received: %s (err: %s)", "hello\n", output, output2)
}
}
func TestCommitRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
container1, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
if err := container1.Run(); err != nil {
t.Fatal(err)
}
if container1.State.Running {
t.Errorf("Container shouldn't be running")
}
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := runtime.Create(
&Config{
Image: img.Id,
Cmd: []string{"cat", "/world"},
},
)
if err != nil {
@@ -278,9 +421,8 @@ func TestRun(t *testing.T) {
defer nuke(runtime)
container, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Memory: 33554432,
Cmd: []string{"ls", "-al"},
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {

View File

@@ -1,17 +1,22 @@
package main
import (
"fmt"
"io"
"log"
"net"
"os"
"os/exec"
"path"
"time"
)
const DOCKER_PATH = "/home/creack/dotcloud/docker/docker/docker"
var DOCKER_PATH string = path.Join(os.Getenv("DOCKERPATH"), "docker")
// WARNING: this crashTest will 1) crash your host, 2) remove all containers
func runDaemon() (*exec.Cmd, error) {
os.Remove("/var/run/docker.pid")
exec.Command("rm", "-rf", "/var/lib/docker/containers").Run()
cmd := exec.Command(DOCKER_PATH, "-d")
outPipe, err := cmd.StdoutPipe()
if err != nil {
@@ -38,19 +43,43 @@ func crashTest() error {
return err
}
var endpoint string
if ep := os.Getenv("TEST_ENDPOINT"); ep == "" {
endpoint = "192.168.56.1:7979"
} else {
endpoint = ep
}
c := make(chan bool)
var conn io.Writer
go func() {
conn, _ = net.Dial("tcp", endpoint)
c <- false
}()
go func() {
time.Sleep(2 * time.Second)
c <- true
}()
<-c
restartCount := 0
totalTestCount := 1
for {
daemon, err := runDaemon()
if err != nil {
return err
}
restartCount++
// time.Sleep(5000 * time.Millisecond)
var stop bool
go func() error {
stop = false
for i := 0; i < 100 && !stop; i++ {
for i := 0; i < 100 && !stop; {
func() error {
cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", "hello", "world")
log.Printf("%d", i)
cmd := exec.Command(DOCKER_PATH, "run", "base", "echo", fmt.Sprintf("%d", totalTestCount))
i++
totalTestCount++
outPipe, err := cmd.StdoutPipe()
if err != nil {
return err
@@ -62,9 +91,10 @@ func crashTest() error {
if err := cmd.Start(); err != nil {
return err
}
go func() {
io.Copy(os.Stdout, outPipe)
}()
if conn != nil {
go io.Copy(conn, outPipe)
}
// Expecting error, do not check
inPipe.Write([]byte("hello world!!!!!\n"))
go inPipe.Write([]byte("hello world!!!!!\n"))

View File

@@ -49,26 +49,39 @@ def docker(args, stdin=None):
def image_exists(img):
return docker(["inspect", img]).read().strip() != ""
def run_and_commit(img_in, cmd, stdin=None):
def image_config(img):
return json.loads(docker(["inspect", img]).read()).get("config", {})
def run_and_commit(img_in, cmd, stdin=None, author=None, run=None):
run_id = docker(["run"] + (["-i", "-a", "stdin"] if stdin else ["-d"]) + [img_in, "/bin/sh", "-c", cmd], stdin=stdin).read().rstrip()
print "---> Waiting for " + run_id
result=int(docker(["wait", run_id]).read().rstrip())
if result != 0:
print "!!! '{}' return non-zero exit code '{}'. Aborting.".format(cmd, result)
sys.exit(1)
return docker(["commit", run_id]).read().rstrip()
return docker(["commit"] + (["-author", author] if author else []) + (["-run", json.dumps(run)] if run is not None else []) + [run_id]).read().rstrip()
def insert(base, src, dst):
def insert(base, src, dst, author=None):
print "COPY {} to {} in {}".format(src, dst, base)
if dst == "":
raise Exception("Missing destination path")
stdin = file(src)
stdin.seek(0)
return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin)
return run_and_commit(base, "cat > {0}; chmod +x {0}".format(dst), stdin=stdin, author=author)
def add(base, src, dst, author=None):
print "PUSH to {} in {}".format(dst, base)
if src == ".":
tar = subprocess.Popen(["tar", "-c", "."], stdout=subprocess.PIPE).stdout
else:
tar = subprocess.Popen(["curl", src], stdout=subprocess.PIPE).stdout
if dst == "":
raise Exception("Missing argument to push")
return run_and_commit(base, "mkdir -p '{0}' && tar -C '{0}' -x".format(dst), stdin=tar, author=author)
def main():
base=""
maintainer=""
steps = []
try:
for line in sys.stdin.readlines():
@@ -77,22 +90,47 @@ def main():
if line == "" or line[0] == "#":
continue
op, param = line.split(" ", 1)
print op.upper() + " " + param
if op == "from":
print "FROM " + param
base = param
steps.append(base)
elif op == "maintainer":
maintainer = param
elif op == "run":
print "RUN " + param
result = run_and_commit(base, param)
result = run_and_commit(base, param, author=maintainer)
steps.append(result)
base = result
print "===> " + base
elif op == "copy":
src, dst = param.split(" ", 1)
result = insert(base, src, dst)
result = insert(base, src, dst, author=maintainer)
steps.append(result)
base = result
print "===> " + base
elif op == "add":
src, dst = param.split(" ", 1)
result = add(base, src, dst, author=maintainer)
steps.append(result)
base=result
print "===> " + base
elif op == "expose":
config = image_config(base)
if config.get("PortSpecs") is None:
config["PortSpecs"] = []
portspec = param.strip()
config["PortSpecs"].append(portspec)
result = run_and_commit(base, "# (nop) expose port {}".format(portspec), author=maintainer, run=config)
steps.append(result)
base=result
print "===> " + base
elif op == "cmd":
config = image_config(base)
cmd = list(json.loads(param))
config["Cmd"] = cmd
result = run_and_commit(base, "# (nop) set default command to '{}'".format(" ".join(cmd)), author=maintainer, run=config)
steps.append(result)
base=result
print "===> " + base
else:
print "Skipping uknown op " + op
except:

View File

@@ -1,11 +1,13 @@
# Start build from a know base image
maintainer Solomon Hykes <solomon@dotcloud.com>
from base:ubuntu-12.10
# Update ubuntu sources
run echo 'deb http://archive.ubuntu.com/ubuntu quantal main universe multiverse' > /etc/apt/sources.list
run apt-get update
# Install system packages
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
# Insert files from the host (./myscript must be present in the current directory)
copy myscript /usr/local/bin/myscript
copy myscript /usr/local/bin/myscript
push /src

View File

@@ -0,0 +1,3 @@
#!/bin/sh
echo hello, world!

61
contrib/mkimage-debian.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/bin/bash
set -e
# these should match the names found at http://www.debian.org/releases/
stableSuite='squeeze'
testingSuite='wheezy'
unstableSuite='sid'
# if suite is equal to this, it gets the "latest" tag
latestSuite="$testingSuite"
variant='minbase'
include='iproute,iputils-ping'
repo="$1"
suite="${2:-$latestSuite}"
mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
if [ ! "$repo" ]; then
echo >&2 "usage: $0 repo [suite [mirror]]"
echo >&2 " ie: $0 tianon/debian squeeze"
exit 1
fi
target="/tmp/docker-rootfs-debian-$suite-$$-$RANDOM"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
returnTo="$(pwd -P)"
set -x
# bootstrap
mkdir -p "$target"
sudo debootstrap --verbose --variant="$variant" --include="$include" "$suite" "$target" "$mirror"
cd "$target"
# create the image
img=$(sudo tar -c . | docker import -)
# tag suite
docker tag $img $repo $suite
if [ "$suite" = "$latestSuite" ]; then
# tag latest
docker tag $img $repo latest
fi
# test the image
docker run -i -t $repo:$suite echo success
# unstable's version numbers match testing (since it's mostly just a sandbox for testing), so it doesn't get a version number tag
if [ "$suite" != "$unstableSuite" -a "$suite" != 'unstable' ]; then
# tag the specific version
ver=$(docker run $repo:$suite cat /etc/debian_version)
docker tag $img $repo $ver
fi
# cleanup
cd "$returnTo"
sudo rm -rf "$target"

View File

@@ -7,15 +7,16 @@ import (
"github.com/dotcloud/docker/rcli"
"github.com/dotcloud/docker/term"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"syscall"
)
var (
GIT_COMMIT string
NO_MEMORY_LIMIT string
GIT_COMMIT string
)
func main() {
@@ -27,6 +28,7 @@ func main() {
// FIXME: Switch d and D ? (to be more sshd like)
flDaemon := flag.Bool("d", false, "Daemon mode")
flDebug := flag.Bool("D", false, "Debug mode")
flAutoRestart := flag.Bool("r", false, "Restart previously running containers")
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge")
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID")
flag.Parse()
@@ -39,16 +41,12 @@ func main() {
os.Setenv("DEBUG", "1")
}
docker.GIT_COMMIT = GIT_COMMIT
docker.NO_MEMORY_LIMIT = NO_MEMORY_LIMIT == "1"
if *flDaemon {
if flag.NArg() != 0 {
flag.Usage()
return
}
if NO_MEMORY_LIMIT == "1" {
log.Printf("WARNING: This version of docker has been compiled without memory limit support.")
}
if err := daemon(*pidfile); err != nil {
if err := daemon(*pidfile, *flAutoRestart); err != nil {
log.Fatal(err)
}
} else {
@@ -59,8 +57,13 @@ func main() {
}
func createPidFile(pidfile string) error {
if _, err := os.Stat(pidfile); err == nil {
return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
if pidString, err := ioutil.ReadFile(pidfile); err == nil {
pid, err := strconv.Atoi(string(pidString))
if err == nil {
if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
}
}
}
file, err := os.Create(pidfile)
@@ -80,7 +83,7 @@ func removePidFile(pidfile string) {
}
}
func daemon(pidfile string) error {
func daemon(pidfile string, autoRestart bool) error {
if err := createPidFile(pidfile); err != nil {
log.Fatal(err)
}
@@ -95,7 +98,7 @@ func daemon(pidfile string) error {
os.Exit(0)
}()
service, err := docker.NewServer()
service, err := docker.NewServer(autoRestart)
if err != nil {
return err
}

View File

@@ -51,6 +51,7 @@ docs:
cp sources/dotcloud.yml $(BUILDDIR)/html/
cp sources/CNAME $(BUILDDIR)/html/
cp sources/.nojekyll $(BUILDDIR)/html/
cp sources/nginx.conf $(BUILDDIR)/html/
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@@ -9,3 +9,19 @@
Create a new image from a container's changes
-m="": Commit message
-author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
-run="": Config automatically applied when the image is run. "+`(ex: {"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
Full -run example::
{"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"PortSpecs": ["22", "80", "443"],
"Tty": true,
"OpenStdin": true,
"StdinOnce": true,
"Env": ["FOO=BAR", "FOO2=BAR2"],
"Cmd": ["cat", "-e", "/etc/resolv.conf"],
"Dns": ["8.8.8.8", "8.8.4.4"]}

View File

@@ -17,3 +17,5 @@
-p=[]: Map a network port to the container
-t=false: Allocate a pseudo-tty
-u="": Username or UID
-d=[]: Set custom dns servers for the container
-v=[]: Creates a new volumes and mount it at the specified path. A container ID can be passed instead of a path in order to mount all volumes from the given container.

View File

@@ -10,9 +10,9 @@ Building blocks
Images
------
An original container image. These are stored on disk and are comparable with what you normally expect from a stoppped virtual machine image. Images are stored (and retrieved from) repository
An original container image. These are stored on disk and are comparable with what you normally expect from a stopped virtual machine image. Images are stored (and retrieved from) repository
Images are stored on your local file system under /var/lib/docker/images
Images are stored on your local file system under /var/lib/docker/graph
.. _containers:

View File

@@ -0,0 +1,53 @@
:title: Sharing data between 2 couchdb databases
:description: Sharing data between 2 couchdb databases
:keywords: docker, example, package installation, networking, couchdb, data volumes
.. _running_redis_service:
Create a redis service
======================
.. include:: example_header.inc
Here's an example of using data volumes to share the same data between 2 couchdb containers.
This could be used for hot upgrades, testing different versions of couchdb on the same data, etc.
Create first database
---------------------
Note that we're marking /var/lib/couchdb as a data volume.
.. code-block:: bash
COUCH1=$(docker run -d -v /var/lib/couchdb shykes/couchdb:2013-05-03)
Add data to the first database
------------------------------
We're assuming your docker host is reachable at `localhost`. If not, replace `localhost` with the public IP of your docker host.
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(docker port $COUCH1 5984)/_utils/"
echo "Navigate to $URL in your browser, and use the couch interface to add data"
Create second database
----------------------
This time, we're requesting shared access to $COUCH1's volumes.
.. code-block:: bash
COUCH2=$(docker run -d -volumes-from $COUCH1) shykes/couchdb:2013-05-03)
Browse data on the second database
----------------------------------
.. code-block:: bash
HOST=localhost
URL="http://$HOST:$(docker port $COUCH2 5984)/_utils/"
echo "Navigate to $URL in your browser. You should see the same data as in the first database!"
Congratulations, you are running 2 Couchdb containers, completely isolated from each other *except* for their data.

View File

@@ -18,3 +18,4 @@ Contents:
python_web_app
running_redis_service
running_ssh_service
couchdb_data_volumes

View File

@@ -49,7 +49,7 @@ Save the changed we just made in the container to a new image called "_/builds/g
WEB_WORKER=$(docker run -d -p 5000 $BUILD_IMG /usr/local/bin/runapp)
- **"docker run -d "** run a command in a new container. We pass "-d" so it runs as a daemon.
**"-p 5000"* the web app is going to listen on this port, so it must be mapped from the container to the host system.
- **"-p 5000"** the web app is going to listen on this port, so it must be mapped from the container to the host system.
- **"$BUILD_IMG"** is the image we want to run the command inside of.
- **/usr/local/bin/runapp** is the command which starts the web app.

View File

@@ -71,34 +71,40 @@
<h2>
<a name="installing-on-ubuntu-1204-and-1210" class="anchor" href="#installing-on-ubuntu-1204-and-1210"><span class="mini-icon mini-icon-link"></span>
</a>Installing on Ubuntu</h2>
<p><strong>Requirements</strong></p>
<ul>
<li>Ubuntu 12.04 (LTS) (64-bit)</li>
<li> or Ubuntu 12.10 (quantal) (64-bit)</li>
</ul>
<ol>
<li>
<p>Install dependencies:</p>
<p><strong>Install dependencies</strong></p>
The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
<pre>sudo apt-get install linux-image-extra-`uname -r`</pre>
<div class="highlight">
<pre>sudo apt-get install lxc wget bsdtar curl</pre>
<pre>sudo apt-get install linux-image-extra-<span class="sb">`</span>uname -r<span class="sb">`</span></pre></div>
<p>The <code>linux-image-extra</code> package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.</p>
</li>
<li>
<p>Install the latest docker binary:</p>
<p><strong>Install Docker</strong></p>
<p>Add the Ubuntu PPA (Personal Package Archive) sources to your apt sources list, update and install.</p>
<p>You may see some warnings that the GPG keys cannot be verified.</p>
<div class="highlight">
<pre>sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"</pre>
<pre>sudo apt-get update</pre>
<pre>sudo apt-get install lxc-docker</pre>
</div>
</li>
<li>
<p><strong>Run!</strong></p>
<div class="highlight">
<pre>wget http://get.docker.io/builds/<span class="k">$(</span>uname -s<span class="k">)</span>/<span class="k">$(</span>uname -m<span class="k">)</span>/docker-master.tgz</pre>
<pre>tar -xf docker-master.tgz</pre>
<pre>docker run -i -t ubuntu /bin/bash</pre>
</div>
</li>
<li>
<p>Run your first container!</p>
<div class="highlight"><pre><span class="nb">cd </span>docker-master</pre>
<pre>sudo ./docker run -i -t base /bin/bash</pre>
</div>
<p>Done!</p>
<p>Consider adding docker to your <code>PATH</code> for simplicity.</p>
</li>
Continue with the <a href="http://docs.docker.io/en/latest/examples/hello_world/">Hello world</a> example.
</ol>
</section>
@@ -117,7 +123,7 @@
vagrant and an Ubuntu virtual machine.</strong></p>
<ul>
<li><a href="http://docs.docker.io/en/latest/installation/macos/">Mac OS X and other linuxes</a></li>
<li><a href="http://docs.docker.io/en/latest/installation/vagrant/">Mac OS X and other linuxes</a></li>
<li><a href="http://docs.docker.io/en/latest/installation/windows/">Windows</a></li>
</ul>

View File

@@ -15,6 +15,7 @@ This documentation has the following resources:
examples/index
contributing/index
commandline/index
registry/index
faq

View File

@@ -1,8 +1,9 @@
Amazon EC2
==========
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
may be out of date because it depends on some binaries to be updated and published
Please note this is a community contributed installation path. The only 'official' installation is using the
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
Installation
------------
@@ -17,7 +18,7 @@ Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant
vagrant plugin install vagrant-aws
3. Get the docker sources, this will give you the latest Vagrantfile and puppet manifests.
3. Get the docker sources, this will give you the latest Vagrantfile.
::

View File

@@ -3,19 +3,23 @@
Arch Linux
==========
Please note this is a community contributed installation path. The only 'official' installation is using the
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
Installing on Arch Linux is not officially supported but can be handled via
either of the following AUR packages:
* `dotcloud-docker <https://aur.archlinux.org/packages/dotcloud-docker/>`_
* `dotcloud-docker-git <https://aur.archlinux.org/packages/dotcloud-docker-git/>`_
* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
The dotcloud-docker package will install the latest tagged version of docker.
The dotcloud-docker-git package will build from the current master branch.
The lxc-docker package will install the latest tagged version of docker.
The lxc-docker-git package will build from the current master branch.
Dependencies
------------
Docker depends on several packages which will be installed automatically with
Docker depends on several packages which are specified as dependencies in
either AUR package.
* aufs3
@@ -23,6 +27,7 @@ either AUR package.
* go
* iproute2
* linux-aufs_friendly
* lxc
Installation
------------
@@ -37,7 +42,24 @@ new kernel will be compiled and this can take quite a while.
::
yaourt -S dotcloud-docker-git
yaourt -S lxc-docker-git
Starting Docker
---------------
Prior to starting docker modify your bootloader to use the
**linux-aufs_friendly** kernel and reboot your system.
There is a systemd service unit created for docker. To start the docker service:
::
sudo systemctl start docker
To start on system boot:
::
sudo systemctl enable docker

View File

@@ -0,0 +1,53 @@
.. _binaries:
Binaries
========
**Please note this project is currently under heavy development. It should not be used in production.**
Right now, the officially supported distributions are:
- Ubuntu 12.04 (precise LTS) (64-bit)
- Ubuntu 12.10 (quantal) (64-bit)
Install dependencies:
---------------------
::
sudo apt-get install lxc bsdtar
sudo apt-get install linux-image-extra-`uname -r`
The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
Install the docker binary:
::
wget http://get.docker.io/builds/Linux/x86_64/docker-master.tgz
tar -xf docker-master.tgz
sudo cp ./docker-master /usr/local/bin
Note: docker currently only supports 64-bit Linux hosts.
Run the docker daemon
---------------------
::
sudo docker -d &
Run your first container!
-------------------------
::
docker run -i -t ubuntu /bin/bash
Continue with the :ref:`hello_world` example.

View File

@@ -13,8 +13,9 @@ Contents:
:maxdepth: 1
ubuntulinux
binaries
archlinux
macos
vagrant
windows
amazon
upgrading

View File

@@ -1,66 +0,0 @@
Mac OS X and other linux
========================
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
may be out of date because it depends on some binaries to be updated and published
Requirements
------------
We currently rely on some Ubuntu-linux specific packages, this will change in the future, but for now we provide a
streamlined path to install Virtualbox with a Ubuntu 12.10 image using Vagrant.
1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
3. Install git if you had not installed it before, check if it is installed by running
``git`` in a terminal window
We recommend having at least about 2Gb of free disk space and 2Gb RAM (or more).
Installation
------------
1. Fetch the docker sources
.. code-block:: bash
git clone https://github.com/dotcloud/docker.git
2. Run vagrant from the sources directory
.. code-block:: bash
vagrant up
Vagrant will:
* Download the Quantal64 base ubuntu virtual machine image from get.docker.io/
* Boot this image in virtualbox
Then it will use Puppet to perform an initial setup in this machine:
* Download & untar the most recent docker binary tarball to vagrant homedir.
* Debootstrap to /var/lib/docker/images/ubuntu.
* Install & run dockerd as service.
* Put docker in /usr/local/bin.
* Put latest Go toolchain in /usr/local/go.
You now have a Ubuntu Virtual Machine running with docker pre-installed.
To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
``vagrant up``. Vagrant will make sure to connect you to the correct VM.
.. code-block:: bash
vagrant ssh
Now you are in the VM, run docker
.. code-block:: bash
docker
Continue with the :ref:`hello_world` example.

View File

@@ -6,51 +6,56 @@ Ubuntu Linux
**Please note this project is currently under heavy development. It should not be used in production.**
Installing on Ubuntu 12.04 and 12.10
Right now, the officially supported distributions are:
* Ubuntu 12.04 (precise LTS)
* Ubuntu 12.10 (quantal)
- Ubuntu 12.04 (precise LTS) (64-bit)
- Ubuntu 12.10 (quantal) (64-bit)
Install dependencies:
---------------------
Dependencies
------------
::
The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
sudo apt-get install lxc bsdtar
sudo apt-get install linux-image-extra-`uname -r`
.. code-block:: bash
The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
Install the docker binary
-------------------------
::
wget http://get.docker.io/builds/Linux/x86_64/docker-master.tgz
tar -xf docker-master.tgz
sudo cp ./docker-master /usr/local/bin
Note: docker currently only supports 64-bit Linux hosts.
sudo apt-get install linux-image-extra-`uname -r`
Run the docker daemon
---------------------
Installation
------------
::
sudo docker -d &
Run your first container!
-------------------------
::
docker run -i -t ubuntu /bin/bash
Docker is available as a Ubuntu PPA (Personal Package Archive),
`hosted on launchpad <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
which makes installing Docker on Ubuntu very easy.
Check out more examples
-----------------------
Continue with the :ref:`hello_world` example.
Add the custom package sources to your apt sources list. Copy and paste the following lines at once.
.. code-block:: bash
sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"
Update your sources. You will see a warning that GPG signatures cannot be verified.
.. code-block:: bash
sudo apt-get update
Now install it, you will see another warning that the package cannot be authenticated. Confirm install.
.. code-block:: bash
sudo apt-get install lxc-docker
Verify it worked
.. code-block:: bash
docker
**Done!**, now continue with the :ref:`hello_world` example.

View File

@@ -3,7 +3,8 @@
Upgrading
============
We assume you are upgrading from within the operating system which runs your docker daemon.
These instructions are for upgrading your Docker binary for when you had a custom (non package manager) installation.
If you istalled docker using apt-get, use that to upgrade.
Get the latest docker binary:

View File

@@ -0,0 +1,70 @@
.. _install_using_vagrant:
Using Vagrant
=============
Please note this is a community contributed installation path. The only 'official' installation is using the
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
**Requirements:**
This guide will setup a new virtual machine with docker installed on your computer. This works on most operating
systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM
to spare you should be good.
Install Vagrant and Virtualbox
------------------------------
1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
3. Install git if you had not installed it before, check if it is installed by running
``git`` in a terminal window
Spin it up
----------
1. Fetch the docker sources (this includes the Vagrantfile for machine setup).
.. code-block:: bash
git clone https://github.com/dotcloud/docker.git
2. Run vagrant from the sources directory
.. code-block:: bash
vagrant up
Vagrant will:
* Download the 'official' Precise64 base ubuntu virtual machine image from vagrantup.com
* Boot this image in virtualbox
* Add the `Docker PPA sources <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_ to /etc/apt/sources.lst
* Update your sources
* Install lxc-docker
You now have a Ubuntu Virtual Machine running with docker pre-installed.
Connect
-------
To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
``vagrant up``. Vagrant will connect you to the correct VM.
.. code-block:: bash
vagrant ssh
Run
-----
Now you are in the VM, run docker
.. code-block:: bash
docker
Continue with the :ref:`hello_world` example.

View File

@@ -3,8 +3,8 @@
:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
Windows
=========
Windows (with Vagrant)
======================
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
may be out of date because it depends on some binaries to be updated and published

6
docs/sources/nginx.conf Normal file
View File

@@ -0,0 +1,6 @@
# rule to redirect original links created when hosted on github pages
rewrite ^/documentation/(.*).html http://docs.docker.io/en/latest/$1/ permanent;
# rewrite the stuff which was on the current page
rewrite ^/gettingstarted.html$ /gettingstarted/ permanent;

View File

@@ -0,0 +1,464 @@
===================
Docker Registry API
===================
.. contents:: Table of Contents
1. The 3 roles
===============
1.1 Index
---------
The Index is responsible for centralizing information about:
- User accounts
- Checksums of the images
- Public namespaces
The Index has different components:
- Web UI
- Meta-data store (comments, stars, list public repositories)
- Authentication service
- Tokenization
The index is authoritative for those information.
We expect that there will be only one instance of the index, run and managed by dotCloud.
1.2 Registry
------------
- It stores the images and the graph for a set of repositories
- It does not have user accounts data
- It has no notion of user accounts or authorization
- It delegates authentication and authorization to the Index Auth service using tokens
- It supports different storage backends (S3, cloud files, local FS)
- It doesnt have a local database
- It will be open-sourced at some point
We expect that there will be multiple registries out there. To help to grasp the context, here are some examples of registries:
- **sponsor registry**: such a registry is provided by a third-party hosting infrastructure as a convenience for their customers and the docker community as a whole. Its costs are supported by the third party, but the management and operation of the registry are supported by dotCloud. It features read/write access, and delegates authentication and authorization to the Index.
- **mirror registry**: such a registry is provided by a third-party hosting infrastructure but is targeted at their customers only. Some mechanism (unspecified to date) ensures that public images are pulled from a sponsor registry to the mirror registry, to make sure that the customers of the third-party provider can “docker pull” those images locally.
- **vendor registry**: such a registry is provided by a software vendor, who wants to distribute docker images. It would be operated and managed by the vendor. Only users authorized by the vendor would be able to get write access. Some images would be public (accessible for anyone), others private (accessible only for authorized users). Authentication and authorization would be delegated to the Index. The goal of vendor registries is to let someone do “docker pull basho/riak1.3” and automatically push from the vendor registry (instead of a sponsor registry); i.e. get all the convenience of a sponsor registry, while retaining control on the asset distribution.
- **private registry**: such a registry is located behind a firewall, or protected by an additional security layer (HTTP authorization, SSL client-side certificates, IP address authorization...). The registry is operated by a private entity, outside of dotClouds control. It can optionally delegate additional authorization to the Index, but it is not mandatory.
.. note::
Mirror registries and private registries which do not use the Index dont even need to run the registry code. They can be implemented by any kind of transport implementing HTTP GET and PUT. Read-only registries can be powered by a simple static HTTP server.
.. note::
The latter implies that while HTTP is the protocol of choice for a registry, multiple schemes are possible (and in some cases, trivial):
- HTTP with GET (and PUT for read-write registries);
- local mount point;
- remote docker addressed through SSH.
The latter would only require two new commands in docker, e.g. “registryget” and “registryput”, wrapping access to the local filesystem (and optionally doing consistency checks). Authentication and authorization are then delegated to SSH (e.g. with public keys).
1.3 Docker
----------
On top of being a runtime for LXC, Docker is the Registry client. It supports:
- Push / Pull on the registry
- Client authentication on the Index
2. Workflow
===========
2.1 Pull
--------
.. image:: /static_files/docker_pull_chart.png
1. Contact the Index to know where I should download “samalba/busybox”
2. Index replies:
a. “samalba/busybox” is on Registry A
b. here are the checksums for “samalba/busybox” (for all layers)
c. token
3. Contact Registry A to receive the layers for “samalba/busybox” (all of them to the base image). Registry A is authoritative for “samalba/busybox” but keeps a copy of all inherited layers and serve them all from the same location.
4. registry contacts index to verify if token/user is allowed to download images
5. Index returns true/false lettings registry know if it should proceed or error out
6. Get the payload for all layers
Its possible to run docker pull https://<registry>/repositories/samalba/busybox. In this case, docker bypasses the Index. However the security is not guaranteed (in case Registry A is corrupted) because there wont be any checksum checks.
Currently registry redirects to s3 urls for downloads, going forward all downloads need to be streamed through the registry. The Registry will then abstract the calls to S3 by a top-level class which implements sub-classes for S3 and local storage.
Token is only returned when it is a private repo, public repos do not require tokens to be returned. The Registry will still contact the Index to make sure the pull is authorized (“is it ok to download this repos without a Token?”).
API (pulling repository foo/bar):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. (Docker -> Index) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
X-Docker-Token: true
**Action**:
(looking up the foo/bar in db and gets images and checksums for that repo (all if no tag is specified, if tag, only checksums for those tags) see part 4.4.1)
2. (Index -> Docker) HTTP 200 OK
**Headers**:
- Authorization: Token signature=123abc,repository=”foo/bar”,access=write
- X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
**Body**:
Jsonified checksums (see part 4.4.1)
3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
4. (Registry -> Index) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=read
**Body**:
<ids and checksums in payload>
**Action**:
( Lookup token see if they have access to pull.)
If good:
HTTP 200 OK
Index will invalidate the token
If bad:
HTTP 401 Unauthorized
5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
**Action**:
(for each image id returned in the registry, fetch /json + /layer)
.. note::
If someone makes a second request, then we will always give a new token, never reuse tokens.
2.2 Push
--------
.. image:: /static_files/docker_push_chart.png
1. Contact the index to allocate the repository name “samalba/busybox” (authentication required with user credentials)
2. If authentication works and namespace available, “samalba/busybox” is allocated and a temporary token is returned (namespace is marked as initialized in index)
3. Push the image on the registry (along with the token)
4. Registry A contacts the Index to verify the token (token must corresponds to the repository name)
5. Index validates the token. Registry A starts reading the stream pushed by docker and store the repository (with its images)
6. docker contacts the index to give checksums for upload images
.. note::
**Its possible not to use the Index at all!** In this case, a deployed version of the Registry is deployed to store and serve images. Those images are not authentified and the security is not guaranteed.
.. note::
**Index can be replaced!** For a private Registry deployed, a custom Index can be used to serve and validate token according to different policies.
Docker computes the checksums and submit them to the Index at the end of the push. When a repository name does not have checksums on the Index, it means that the push is in progress (since checksums are submitted at the end).
API (pushing repos foo/bar):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1. (Docker -> Index) PUT /v1/repositories/foo/bar/
**Headers**:
Authorization: Basic sdkjfskdjfhsdkjfh==
X-Docker-Token: true
**Action**::
- in index, we allocated a new repository, and set to initialized
**Body**::
(The body contains the list of images that are going to be pushed, with empty checksums. The checksums will be set at the end of the push)::
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
2. (Index -> Docker) 200 Created
**Headers**:
- WWW-Authenticate: Token signature=123abc,repository=”foo/bar”,access=write
- X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
3. (Docker -> Registry) PUT /v1/images/98765432_parent/json
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
4. (Registry->Index) GET /v1/repositories/foo/bar/images
**Headers**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=write
**Action**::
- Index:
will invalidate the token.
- Registry:
grants a session (if token is approved) and fetches the images id
5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
**Headers**::
- Authorization: Token signature=123abc,repository=”foo/bar”,access=write
- Cookie: (Cookie provided by the Registry)
6. (Docker -> Registry) PUT /v1/images/98765432/json
**Headers**:
Cookie: (Cookie provided by the Registry)
7. (Docker -> Registry) PUT /v1/images/98765432_parent/layer
**Headers**:
Cookie: (Cookie provided by the Registry)
8. (Docker -> Registry) PUT /v1/images/98765432/layer
**Headers**:
X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh
9. (Docker -> Registry) PUT /v1/repositories/foo/bar/tags/latest
**Headers**:
Cookie: (Cookie provided by the Registry)
**Body**:
“98765432”
10. (Docker -> Index) PUT /v1/repositories/foo/bar/images
**Headers**:
Authorization: Basic 123oislifjsldfj==
X-Docker-Endpoints: registry1.docker.io (no validation on this right now)
**Body**:
(The image, ids, tags and checksums)
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
“checksum”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
**Return** HTTP 204
.. note::
If push fails and they need to start again, what happens in the index, there will already be a record for the namespace/name, but it will be initialized. Should we allow it, or mark as name already used? One edge case could be if someone pushes the same thing at the same time with two different shells.
If it's a retry on the Registry, Docker has a cookie (provided by the registry after token validation). So the Index wont have to provide a new token.
3. How to use the Registry in standalone mode
=============================================
The Index has two main purposes (along with its fancy social features):
- Resolve short names (to avoid passing absolute URLs all the time)
- username/projectname -> https://registry.docker.io/users/<username>/repositories/<projectname>/
- team/projectname -> https://registry.docker.io/team/<team>/repositories/<projectname>/
- Authenticate a user as a repos owner (for a central referenced repository)
3.1 Without an Index
--------------------
Using the Registry without the Index can be useful to store the images on a private network without having to rely on an external entity controlled by dotCloud.
In this case, the registry will be launched in a special mode (--standalone? --no-index?). In this mode, the only thing which changes is that Registry will never contact the Index to verify a token. It will be the Registry owner responsibility to authenticate the user who pushes (or even pulls) an image using any mechanism (HTTP auth, IP based, etc...).
In this scenario, the Registry is responsible for the security in case of data corruption since the checksums are not delivered by a trusted entity.
As hinted previously, a standalone registry can also be implemented by any HTTP server handling GET/PUT requests (or even only GET requests if no write access is necessary).
3.2 With an Index
-----------------
The Index data needed by the Registry are simple:
- Serve the checksums
- Provide and authorize a Token
In the scenario of a Registry running on a private network with the need of centralizing and authorizing, its easy to use a custom Index.
The only challenge will be to tell Docker to contact (and trust) this custom Index. Docker will be configurable at some point to use a specific Index, itll be the private entity responsibility (basically the organization who uses Docker in a private environment) to maintain the Index and the Dockers configuration among its consumers.
4. The API
==========
The first version of the api is available here: https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md
4.1 Images
----------
The format returned in the images is not defined here (for layer and json), basically because Registry stores exactly the same kind of information as Docker uses to manage them.
The format of ancestry is a line-separated list of image ids, in age order. I.e. the images parent is on the last line, the parent of the parent on the next-to-last line, etc.; if the image has no parent, the file is empty.
GET /v1/images/<image_id>/layer
PUT /v1/images/<image_id>/layer
GET /v1/images/<image_id>/json
PUT /v1/images/<image_id>/json
GET /v1/images/<image_id>/ancestry
PUT /v1/images/<image_id>/ancestry
4.2 Users
---------
4.2.1 Create a user (Index)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
POST /v1/users
**Body**:
{"email": "sam@dotcloud.com", "password": "toto42", "username": "foobar"'}
**Validation**:
- **username** : min 4 character, max 30 characters, all lowercase no special characters.
- **password**: min 5 characters
**Valid**: return HTTP 200
Errors: HTTP 400 (we should create error codes for possible errors)
- invalid json
- missing field
- wrong format (username, password, email, etc)
- forbidden name
- name already exists
.. note::
A user account will be valid only if the email has been validated (a validation link is sent to the email address).
4.2.2 Update a user (Index)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
PUT /v1/users/<username>
**Body**:
{"password": "toto"}
.. note::
We can also update email address, if they do, they will need to reverify their new email address.
4.2.3 Login (Index)
^^^^^^^^^^^^^^^^^^^
Does nothing else but asking for a user authentication. Can be used to validate credentials. HTTP Basic Auth for now, maybe change in future.
GET /v1/users
**Return**:
- Valid: HTTP 200
- Invalid login: HTTP 401
- Account inactive: HTTP 403 Account is not Active
4.3 Tags (Registry)
-------------------
The Registry does not know anything about users. Even though repositories are under usernames, its just a namespace for the registry. Allowing us to implement organizations or different namespaces per user later, without modifying the Registrys API.
4.3.1 Get all tags
^^^^^^^^^^^^^^^^^^
GET /v1/repositories/<namespace>/<repository_name>/tags
**Return**: HTTP 200
{
"latest": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
“0.1.1”: “b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”
}
4.3.2 Read the content of a tag (resolve the image id)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
GET /v1/repositories/<namespace>/<repo_name>/tags/<tag>
**Return**:
"9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
4.3.3 Delete a tag (registry)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
4.4 Images (Index)
------------------
For the Index to “resolve” the repository name to a Registry location, it uses the X-Docker-Endpoints header. In other terms, this requests always add a “X-Docker-Endpoints” to indicate the location of the registry which hosts this repository.
4.4.1 Get the images
^^^^^^^^^^^^^^^^^^^^^
GET /v1/repositories/<namespace>/<repo_name>/images
**Return**: HTTP 200
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
4.4.2 Add/update the images
^^^^^^^^^^^^^^^^^^^^^^^^^^^
You always add images, you never remove them.
PUT /v1/repositories/<namespace>/<repo_name>/images
**Body**:
[ {“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”, “checksum”: “sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”} ]
**Return** 204
5. Chaining Registries
======================
Its possible to chain Registries server for several reasons:
- Load balancing
- Delegate the next request to another server
When a Registry is a reference for a repository, it should host the entire images chain in order to avoid breaking the chain during the download.
The Index and Registry use this mechanism to redirect on one or the other.
Example with an image download:
On every request, a special header can be returned:
X-Docker-Endpoints: server1,server2
On the next request, the client will always pick a server from this list.
6. Authentication & Authorization
=================================
6.1 On the Index
-----------------
The Index supports both “Basic” and “Token” challenges. Usually when there is a “401 Unauthorized”, the Index replies this::
401 Unauthorized
WWW-Authenticate: Basic realm="auth required",Token
You have 3 options:
1. Provide user credentials and ask for a token
**Header**:
- Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
- X-Docker-Token: true
In this case, along with the 200 response, youll get a new token (if user auth is ok):
**Response**:
- 200 OK
- X-Docker-Token: Token signature=123abc,repository=”foo/bar”,access=read
2. Provide user credentials only
**Header**:
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
3. Provide Token
**Header**:
Authorization: Token signature=123abc,repository=”foo/bar”,access=read
6.2 On the Registry
-------------------
The Registry only supports the Token challenge::
401 Unauthorized
WWW-Authenticate: Token
The only way is to provide a token on “401 Unauthorized” responses::
Authorization: Token signature=123abc,repository=”foo/bar”,access=read
Usually, the Registry provides a Cookie when a Token verification succeeded. Every time the Registry passes a Cookie, you have to pass it back the same cookie.::
200 OK
Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=&timestamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly
Next request::
GET /(...)
Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=&timestamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="

View File

@@ -0,0 +1,15 @@
:title: docker Registry documentation
:description: Documentation for docker Registry and Registry API
:keywords: docker, registry, api, index
Registry
========
Contents:
.. toctree::
:maxdepth: 2
api

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

View File

@@ -0,0 +1,9 @@
package docker
import (
"fmt"
)
func getKernelVersion() (*KernelVersionInfo, error) {
return nil, fmt.Errorf("Kernel version detection is not available on darwin")
}

69
getKernelVersion_linux.go Normal file
View File

@@ -0,0 +1,69 @@
package docker
import (
"bytes"
"strconv"
"strings"
"syscall"
)
func getKernelVersion() (*KernelVersionInfo, error) {
var (
uts syscall.Utsname
flavor string
kernel, major, minor int
err error
)
if err := syscall.Uname(&uts); err != nil {
return nil, err
}
release := make([]byte, len(uts.Release))
i := 0
for _, c := range uts.Release {
release[i] = byte(c)
i++
}
// Remove the \x00 from the release for Atoi to parse correctly
release = release[:bytes.IndexByte(release, 0)]
tmp := strings.SplitN(string(release), "-", 2)
tmp2 := strings.SplitN(tmp[0], ".", 3)
if len(tmp2) > 0 {
kernel, err = strconv.Atoi(tmp2[0])
if err != nil {
return nil, err
}
}
if len(tmp2) > 1 {
major, err = strconv.Atoi(tmp2[1])
if err != nil {
return nil, err
}
}
if len(tmp2) > 2 {
minor, err = strconv.Atoi(tmp2[2])
if err != nil {
return nil, err
}
}
if len(tmp) == 2 {
flavor = tmp[1]
} else {
flavor = ""
}
return &KernelVersionInfo{
Kernel: kernel,
Major: major,
Minor: minor,
Flavor: flavor,
}, nil
}

View File

@@ -2,6 +2,7 @@ package docker
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
@@ -83,18 +84,24 @@ func (graph *Graph) Get(name string) (*Image, error) {
}
// Create creates a new image and registers it in the graph.
func (graph *Graph) Create(layerData Archive, container *Container, comment, author string) (*Image, error) {
func (graph *Graph) Create(layerData Archive, container *Container, comment, author string, config *Config) (*Image, error) {
img := &Image{
Id: GenerateId(),
Comment: comment,
Created: time.Now(),
DockerVersion: VERSION,
Author: author,
Config: config,
}
if container != nil {
img.Parent = container.Image
img.Container = container.Id
img.ContainerConfig = *container.Config
if config == nil {
if parentImage, err := graph.Get(container.Image); err == nil && parentImage != nil {
img.Config = parentImage.Config
}
}
}
if err := graph.Register(layerData, img); err != nil {
return nil, err
@@ -129,12 +136,32 @@ func (graph *Graph) Register(layerData Archive, img *Image) error {
return nil
}
// TempLayerArchive creates a temporary archive of the given image's filesystem layer.
// The archive is stored on disk and will be automatically deleted as soon as has been read.
// If output is not nil, a human-readable progress bar will be written to it.
// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives?
func (graph *Graph) TempLayerArchive(id string, compression Compression, output io.Writer) (*TempArchive, error) {
image, err := graph.Get(id)
if err != nil {
return nil, err
}
tmp, err := graph.tmp()
if err != nil {
return nil, err
}
archive, err := image.TarLayer(compression)
if err != nil {
return nil, err
}
return NewTempArchive(ProgressReader(ioutil.NopCloser(archive), 0, output, "Buffering to disk %v/%v (%v)"), tmp.Root)
}
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
func (graph *Graph) Mktemp(id string) (string, error) {
if id == "" {
id = GenerateId()
}
tmp, err := NewGraph(path.Join(graph.Root, ":tmp:"))
tmp, err := graph.tmp()
if err != nil {
return "", fmt.Errorf("Couldn't create temp: %s", err)
}
@@ -144,6 +171,10 @@ func (graph *Graph) Mktemp(id string) (string, error) {
return tmp.imageRoot(id), nil
}
func (graph *Graph) tmp() (*Graph, error) {
return NewGraph(path.Join(graph.Root, ":tmp:"))
}
// Check if given error is "not empty".
// Note: this is the way golang does it internally with os.IsNotExists.
func isNotEmpty(err error) bool {

View File

@@ -62,7 +62,7 @@ func TestGraphCreate(t *testing.T) {
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "")
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
@@ -122,7 +122,7 @@ func TestMount(t *testing.T) {
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing", "")
image, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
@@ -166,7 +166,7 @@ func createTestImage(graph *Graph, t *testing.T) *Image {
if err != nil {
t.Fatal(err)
}
img, err := graph.Create(archive, nil, "Test image", "")
img, err := graph.Create(archive, nil, "Test image", "", nil)
if err != nil {
t.Fatal(err)
}
@@ -181,7 +181,7 @@ func TestDelete(t *testing.T) {
t.Fatal(err)
}
assertNImages(graph, t, 0)
img, err := graph.Create(archive, nil, "Bla bla", "")
img, err := graph.Create(archive, nil, "Bla bla", "", nil)
if err != nil {
t.Fatal(err)
}
@@ -192,11 +192,11 @@ func TestDelete(t *testing.T) {
assertNImages(graph, t, 0)
// Test 2 create (same name) / 1 delete
img1, err := graph.Create(archive, nil, "Testing", "")
img1, err := graph.Create(archive, nil, "Testing", "", nil)
if err != nil {
t.Fatal(err)
}
if _, err = graph.Create(archive, nil, "Testing", ""); err != nil {
if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 2)

View File

@@ -1 +0,0 @@
This directory contains material helpful for hacking on docker.

27
hack/README.rst Normal file
View File

@@ -0,0 +1,27 @@
This directory contains material helpful for hacking on docker.
make hack
=========
Set up an Ubuntu 13.04 virtual machine for developers including kernel 3.8
and buildbot. The environment is setup in a way that can be used through
the usual go workflow and/or the root Makefile. You can either edit on
your host, or inside the VM (using make ssh-dev) and run and test docker
inside the VM.
dependencies: vagrant, virtualbox packages and python package requests
Buildbot
~~~~~~~~
Buildbot is a continuous integration system designed to automate the
build/test cycle. By automatically rebuilding and testing the tree each time
something has changed, build problems are pinpointed quickly, before other
developers are inconvenienced by the failure.
When running 'make hack' at the docker root directory, it spawns a virtual
machine in the background running a buildbot instance and adds a git
post-commit hook that automatically run docker tests for you.
You can check your buildbot instance at http://192.168.33.21:8010/waterfall

35
hack/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,35 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
BOX_NAME = "ubuntu-dev"
BOX_URI = "http://cloud-images.ubuntu.com/raring/current/raring-server-cloudimg-vagrant-amd64-disk1.box"
VM_IP = "192.168.33.21"
USER = "vagrant"
GOPATH = "/data/docker"
DOCKER_PATH = "#{GOPATH}/src/github.com/dotcloud/docker"
CFG_PATH = "#{DOCKER_PATH}/hack/environment"
BUILDBOT_PATH = "/data/buildbot"
Vagrant::Config.run do |config|
# Setup virtual machine box
config.vm.box = BOX_NAME
config.vm.box_url = BOX_URI
config.vm.share_folder "v-data", DOCKER_PATH, "#{File.dirname(__FILE__)}/.."
config.vm.network :hostonly, VM_IP
# Stop if deployment has been done
config.vm.provision :shell, :inline => "[ ! -f /usr/bin/git ]"
# Touch for makefile
pkg_cmd = "touch #{DOCKER_PATH}; "
# Install docker dependencies
pkg_cmd << "export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; " \
"apt-get install -q -y lxc bsdtar git golang make linux-image-extra-3.8.0-19-generic; " \
"chown -R #{USER}.#{USER} #{GOPATH}; " \
"install -m 0664 #{CFG_PATH}/bash_profile /home/#{USER}/.bash_profile"
config.vm.provision :shell, :inline => pkg_cmd
# Deploy buildbot CI
pkg_cmd = "apt-get install -q -y python-dev python-pip supervisor; " \
"pip install -r #{CFG_PATH}/requirements.txt; " \
"chown #{USER}.#{USER} /data; cd /data; " \
"#{CFG_PATH}/setup.sh #{USER} #{GOPATH} #{DOCKER_PATH} #{CFG_PATH} #{BUILDBOT_PATH}"
config.vm.provision :shell, :inline => pkg_cmd
end

View File

@@ -0,0 +1,23 @@
# This will build a container capable of producing an official binary build of docker and
# uploading it to S3
maintainer Solomon Hykes <solomon@dotcloud.com>
from ubuntu:12.10
run apt-get update
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q curl
# Packages required to checkout and build docker
run curl -s -o /go.tar.gz https://go.googlecode.com/files/go1.0.3.linux-amd64.tar.gz
run tar -C /usr/local -xzf /go.tar.gz
run echo "export PATH=$PATH:/usr/local/go/bin" > /.bashrc
run echo "export PATH=$PATH:/usr/local/go/bin" > /.bash_profile
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q build-essential
# Packages required to build an ubuntu package
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q debhelper
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q autotools-dev
copy fake_initctl /usr/local/bin/initctl
run apt-get install -y -q devscripts
add . /src
run cp /src/dockerbuilder /usr/local/bin/ && chmod +x /usr/local/bin/dockerbuilder
run cp /src/s3cfg /.s3cfg
cmd ["dockerbuilder"]

View File

@@ -0,0 +1,40 @@
#!/bin/sh
set -x
set -e
export PATH=$PATH:/usr/local/go/bin
PACKAGE=github.com/dotcloud/docker
if [ $# -gt 1 ]; then
echo "Usage: $0 [REVISION]"
exit 1
fi
export REVISION=$1
if [ -z "$AWS_ID" ]; then
echo "Warning: environment variable AWS_ID is not set. Won't upload to S3."
fi
if [ -z "$AWS_KEY" ]; then
echo "Warning: environment variable AWS_KEY is not set. Won't upload to S3."
fi
if [ -z "$GPG_KEY" ]; then
echo "Warning: environment variable GPG_KEY is not set. Ubuntu package upload will not succeed."
NO_UBUNTU=1
fi
rm -fr docker-release
git clone https://github.com/dotcloud/docker docker-release
cd docker-release
if [ -z "$REVISION" ]; then
make release
else
make release RELEASE_VERSION=$REVISION
fi
if [ -z "$NO_UBUNTU" ]; then
(cd packaging/ubuntu && make ubuntu)
fi

View File

@@ -0,0 +1,3 @@
#!/bin/sh
echo Whatever you say, man

3
hack/dockerbuilder/s3cfg Normal file
View File

@@ -0,0 +1,3 @@
[default]
access_key = $AWS_ID
secret_key = $AWS_KEY

View File

@@ -0,0 +1 @@
Files used to setup the developer virtual machine

View File

@@ -0,0 +1,19 @@
# ~/.bash_profile : executed by the command interpreter for login shells.
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
[ -d "$HOME/bin" ] && PATH="$HOME/bin:$PATH"
docker=/data/docker/src/github.com/dotcloud/docker
[ -d $docker ] && cd $docker
export GOPATH=/data/docker
export PATH=$PATH:$GOPATH/bin

View File

@@ -13,8 +13,8 @@ TEST_USER = 'buildbot' # Credential to authenticate build triggers
TEST_PWD = 'docker' # Credential to authenticate build triggers
BUILDER_NAME = 'docker'
BUILDPASSWORD = 'pass-docker' # Credential to authenticate buildworkers
DOCKER_PATH = '/data/docker'
GOPATH = '/data/docker'
DOCKER_PATH = '{0}/src/github.com/dotcloud/docker'.format(GOPATH)
c = BuildmasterConfig = {}
@@ -28,10 +28,7 @@ c['slavePortnum'] = PORT_MASTER
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
# Docker test command
test_cmd = """(
cd {0}/..; rm -rf docker-tmp; git clone docker docker-tmp;
cd docker-tmp; make test; exit_status=$?;
cd ..; rm -rf docker-tmp; exit $exit_status)""".format(DOCKER_PATH)
test_cmd = "GOPATH={0} make -C {1} test".format(GOPATH,DOCKER_PATH)
# Builder
factory = BuildFactory()

View File

@@ -0,0 +1,6 @@
sqlalchemy<=0.7.9
sqlalchemy-migrate>=0.7.2
buildbot==0.8.7p1
buildbot_slave==0.8.7p1
nose==1.2.1
requests==1.1.0

45
hack/environment/setup.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/bash
# Setup of buildbot configuration. Package installation is being done by
# Vagrantfile
# Dependencies: buildbot, buildbot-slave, supervisor
USER=$1
GOPATH=$2
DOCKER_PATH=$3
CFG_PATH=$4
BUILDBOT_PATH=$5
SLAVE_NAME="buildworker"
SLAVE_SOCKET="localhost:9989"
BUILDBOT_PWD="pass-docker"
IP=$(sed -nE 's/VM_IP = "(.+)"/\1/p' ${DOCKER_PATH}/hack/Vagrantfile)
export PATH="/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin"
function run { su $USER -c "$1"; }
# Exit if buildbot has already been installed
[ -d "$BUILDBOT_PATH" ] && exit 0
# Setup buildbot
run "mkdir -p $BUILDBOT_PATH"
cd $BUILDBOT_PATH
run "buildbot create-master master"
run "cp $CFG_PATH/master.cfg master"
run "sed -i 's/localhost/$IP/' master/master.cfg"
run "sed -i -E 's#(GOPATH = ).+#\1\"$GOPATH\"#' master/master.cfg"
run "sed -i -E 's#(DOCKER_PATH = ).+#\1\"$DOCKER_PATH\"#' master/master.cfg"
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
# Allow buildbot subprocesses (docker tests) to properly run in containers,
# in particular with docker -u
run "sed -i 's/^umask = None/umask = 000/' slave/buildbot.tac"
# Setup supervisor
cp $CFG_PATH/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
sed -i -E "s/^chmod=0700.+/chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
kill -HUP $(pgrep -f "/usr/bin/python /usr/bin/supervisord")
# Add git hook
cp $CFG_PATH/post-commit $DOCKER_PATH/.git/hooks
sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit

View File

@@ -24,6 +24,7 @@ type Image struct {
ContainerConfig Config `json:"container_config,omitempty"`
DockerVersion string `json:"docker_version,omitempty"`
Author string `json:"author,omitempty"`
Config *Config `json:"config,omitempty"`
graph *Graph
}
@@ -92,7 +93,7 @@ func MountAUFS(ro []string, rw string, target string) error {
rwBranch := fmt.Sprintf("%v=rw", rw)
roBranches := ""
for _, layer := range ro {
roBranches += fmt.Sprintf("%v=ro:", layer)
roBranches += fmt.Sprintf("%v=ro+wh:", layer)
}
branches := fmt.Sprintf("br:%v:%v", rwBranch, roBranches)
@@ -110,6 +111,15 @@ func MountAUFS(ro []string, rw string, target string) error {
return nil
}
// TarLayer returns a tar archive of the image's filesystem layer.
func (image *Image) TarLayer(compression Compression) (Archive, error) {
layerPath, err := image.layer()
if err != nil {
return nil, err
}
return Tar(layerPath, compression)
}
func (image *Image) Mount(root, rw string) error {
if mounted, err := Mounted(root); err != nil {
return err
@@ -127,34 +137,9 @@ func (image *Image) Mount(root, rw string) error {
if err := os.Mkdir(rw, 0755); err != nil && !os.IsExist(err) {
return err
}
// FIXME: @creack shouldn't we do this after going over changes?
if err := MountAUFS(layers, rw, root); err != nil {
return err
}
// FIXME: Create tests for deletion
// FIXME: move this part to change.go
// Retrieve the changeset from the parent and apply it to the container
// - Retrieve the changes
changes, err := Changes(layers, layers[0])
if err != nil {
return err
}
// Iterate on changes
for _, c := range changes {
// If there is a delete
if c.Kind == ChangeDelete {
// Make sure the directory exists
file_path, file_name := path.Dir(c.Path), path.Base(c.Path)
if err := os.MkdirAll(path.Join(rw, file_path), 0755); err != nil {
return err
}
// And create the whiteout (we just need to create empty file, discard the return)
if _, err := os.Create(path.Join(path.Join(rw, file_path),
".wh."+path.Base(file_name))); err != nil {
return err
}
}
}
return nil
}

View File

@@ -79,7 +79,11 @@ lxc.mount.entry = {{.SysInitPath}} {{$ROOTFS}}/sbin/init none bind,ro 0 0
# In order to get a working DNS environment, mount bind (ro) the host's /etc/resolv.conf into the container
lxc.mount.entry = {{.ResolvConfPath}} {{$ROOTFS}}/etc/resolv.conf none bind,ro 0 0
{{if .Volumes}}
{{range $virtualPath, $realPath := .GetVolumes}}
lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,rw 0 0
{{end}}
{{end}}
# drop linux capabilities (apply mainly to the user root in the container)
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config

View File

@@ -0,0 +1,25 @@
Docker on Arch
==============
The AUR lxc-docker and lxc-docker-git packages handle building docker on Arch
linux. The PKGBUILD specifies all dependencies, build, and packaging steps.
Dependencies
============
The only buildtime dependencies are git and go which are available via pacman.
The -s flag can be used on makepkg commands below to automatically install
these dependencies.
Building Package
================
Download the tarball for either AUR packaged to a local directory. In that
directory makepkg can be run to build the package.
# Build the binary package
makepkg
# Build an updated source tarball
makepkg --source

View File

@@ -1,30 +1,146 @@
lxc-docker (0.2.2-1) precise; urgency=low
- Support for data volumes ('docker run -v=PATH')
- Share data volumes between containers ('docker run -volumes-from')
- Improved documentation
- Upgrade to Go 1.0.3
- Various upgrades to the dev environment for contributors
-- dotCloud <ops@dotcloud.com> Fri, 3 May 2013 00:00:00 -0700
lxc-docker (0.2.1-1) precise; urgency=low
- 'docker commit -run' bundles a layer with default runtime options: command, ports etc.
- Improve install process on Vagrant
- New Dockerfile operation: "maintainer"
- New Dockerfile operation: "expose"
- New Dockerfile operation: "cmd"
- Contrib script to build a Debian base layer
- 'docker -d -r': restart crashed containers at daemon startup
- Runtime: improve test coverage
-- dotCloud <ops@dotcloud.com> Wed, 1 May 2013 00:00:00 -0700
lxc-docker (0.2.0-1) precise; urgency=low
- Runtime: ghost containers can be killed and waited for
- Documentation: update install intructions
- Packaging: fix Vagrantfile
- Development: automate releasing binaries and ubuntu packages
- Add a changelog
- Various bugfixes
-- dotCloud <ops@dotcloud.com> Mon, 23 Apr 2013 00:00:00 -0700
lxc-docker (0.1.8-1) precise; urgency=low
- Dynamically detect cgroup capabilities
- Issue stability warning on kernels <3.8
- 'docker push' buffers on disk instead of memory
- Fix 'docker diff' for removed files
- Fix 'docker stop' for ghost containers
- Fix handling of pidfile
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Mon, 22 Apr 2013 00:00:00 -0700
lxc-docker (0.1.7-1) precise; urgency=low
- Container ports are available on localhost
- 'docker ps' shows allocated TCP ports
- Contributors can run 'make hack' to start a continuous integration VM
- Streamline ubuntu packaging & uploading
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Thu, 18 Apr 2013 00:00:00 -0700
lxc-docker (0.1.6-1) precise; urgency=low
Improvements [+], Updates [*], Bug fixes [-]:
+ Multiple improvements, updates and bug fixes
- Record the author an image with 'docker commit -author'
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 20:43:43 -0700
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
lxc-docker (0.1.4.1-1) precise; urgency=low
lxc-docker (0.1.5-1) precise; urgency=low
Improvements [+], Updates [*], Bug fixes [-]:
* Test PPA
- Disable standalone mode
- Use a custom DNS resolver with 'docker -d -dns'
- Detect ghost containers
- Improve diagnosis of missing system capabilities
- Allow disabling memory limits at compile time
- Add debian packaging
- Documentation: installing on Arch Linux
- Documentation: running Redis on docker
- Fixed lxc 0.9 compatibility
- Automatically load aufs module
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Mon, 15 Apr 2013 12:14:50 -0700
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
lxc-docker (0.1.4-1) precise; urgency=low
Improvements [+], Updates [*], Bug fixes [-]:
* Changed default bridge interface do 'docker0'
- Fix a race condition when running the port allocator
- Full support for TTY emulation
- Detach from a TTY session with the escape sequence `C-p C-q`
- Various bugfixes and stability improvements
- Minor UI improvements
- Automatically create our own bridge interface 'docker0'
-- dotCloud <ops@dotcloud.com> Fri, 12 Apr 2013 12:20:06 -0700
-- dotCloud <ops@dotcloud.com> Tue, 9 Apr 2013 00:00:00 -0700
lxc-docker (0.1.0-1) unstable; urgency=low
lxc-docker (0.1.3-1) precise; urgency=low
* Initial release
- Choose TCP frontend port with '-p :PORT'
- Layer format is versioned
- Major reliability improvements to the process manager
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Mon, 25 Mar 2013 05:51:12 -0700
-- dotCloud <ops@dotcloud.com> Thu, 4 Apr 2013 00:00:00 -0700
lxc-docker (0.1.2-1) precise; urgency=low
- Set container hostname with 'docker run -h'
- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
- Various bugfixes and stability improvements
- UI polish
- Progress bar on push/pull
- Use XZ compression by default
- Make IP allocator lazy
-- dotCloud <ops@dotcloud.com> Wed, 3 Apr 2013 00:00:00 -0700
lxc-docker (0.1.1-1) precise; urgency=low
- Display shorthand IDs for convenience
- Stabilize process management
- Layers can include a commit message
- Simplified 'docker attach'
- Fixed support for re-attaching
- Various bugfixes and stability improvements
- Auto-download at run
- Auto-login on push
- Beefed up documentation
-- dotCloud <ops@dotcloud.com> Sun, 31 Mar 2013 00:00:00 -0700
lxc-docker (0.1.0-1) precise; urgency=low
- First release
- Implement registry in order to push/pull images
- TCP port allocation
- Fix termcaps on Linux
- Add documentation
- Add Vagrant support with Vagrantfile
- Add unit tests
- Add repository/tags to ease image management
- Improve the layer implementation
-- dotCloud <ops@dotcloud.com> Sat, 23 Mar 2013 00:00:00 -0700

View File

@@ -15,9 +15,12 @@ accessed adding the following line to /etc/apt/sources.list ::
Releasing a new package
~~~~~~~~~~~~~~~~~~~~~~~
The most relevant information to update is the changelog file:
The most relevant information to update is the packaging/ubuntu/changelog file:
Each new release should create a new first paragraph with new release version,
changes, and the maintainer information.
changes, and the maintainer information. The core of this paragraph is
located on CHANGELOG.md. Make sure to transcribe it and translate the formats
(eg: packaging/ubuntu/changelog uses 2 spaces for body change descriptions
instead of 1 space from CHANGELOG.md)
Assuming your PPA GPG signing key is on /media/usbdrive/docker.key, load it
into the GPG_KEY environment variable with::
@@ -28,8 +31,9 @@ into the GPG_KEY environment variable with::
After this is done and you are ready to upload the package to the PPA, you have
a couple of choices:
* Follow README.debian to generate the actual source packages and upload them
to the PPA
* Follow packaging/ubuntu/README.ubuntu to generate the actual source packages
and upload them to the PPA
* Let vagrant do all the work for you::
( cd docker/packaging/ubuntu; vagrant up )

View File

@@ -1,17 +0,0 @@
node default {
exec {
"apt_update" :
command => "/usr/bin/apt-get update"
}
Package {
require => Exec['apt_update']
}
group { "puppet":
ensure => "present"
}
include "docker"
}

View File

@@ -1,99 +0,0 @@
class virtualbox {
Package { ensure => "installed" }
# remove some files from the base vagrant image because they're old
file { "/home/vagrant/docker-master":
ensure => absent,
recurse => true,
force => true,
purge => true,
}
file { "/usr/local/bin/dockerd":
ensure => absent,
}
file { "/usr/local/bin/docker":
ensure => absent,
}
# Set up VirtualBox guest utils
package { "virtualbox-guest-utils": }
exec { "vbox-add" :
command => "/etc/init.d/vboxadd setup",
require => [
Package["virtualbox-guest-utils"],
Package["linux-headers-3.5.0-25-generic"], ],
}
}
class docker {
# update this with latest go binary dist
$go_url = "http://go.googlecode.com/files/go1.0.3.linux-amd64.tar.gz"
Package { ensure => "installed" }
package { ["lxc", "debootstrap", "wget", "bsdtar", "git",
"linux-image-3.5.0-25-generic",
"linux-image-extra-3.5.0-25-generic",
"linux-headers-3.5.0-25-generic"]: }
$ec2_version = file("/etc/ec2_version", "/dev/null")
$rax_version = inline_template("<%= %x{/usr/bin/xenstore-read vm-data/provider_data/provider} %>")
if ($ec2_version) {
$vagrant_user = "ubuntu"
$vagrant_home = "/home/ubuntu"
} elsif ($rax_version) {
$vagrant_user = "root"
$vagrant_home = "/root"
} else {
# virtualbox is the vagrant default, so it should be safe to assume
$vagrant_user = "vagrant"
$vagrant_home = "/home/vagrant"
include virtualbox
}
exec { "fetch-go":
require => Package["wget"],
command => "/usr/bin/wget -O - $go_url | /bin/tar xz -C /usr/local",
creates => "/usr/local/go/bin/go",
}
file { "/etc/init/dockerd.conf":
mode => 600,
owner => "root",
group => "root",
content => template("docker/dockerd.conf"),
}
file { "/opt/go":
owner => $vagrant_user,
group => $vagrant_user,
recurse => true,
}
file { "${vagrant_home}/.profile":
mode => 644,
owner => $vagrant_user,
group => $vagrant_user,
content => template("docker/profile"),
}
exec { "build-docker" :
cwd => "/opt/go/src/github.com/dotcloud/docker",
user => $vagrant_user,
environment => "GOPATH=/opt/go",
command => "/usr/local/go/bin/go get -v ./... && /usr/local/go/bin/go install ./docker",
creates => "/opt/go/bin/docker",
logoutput => "on_failure",
require => [ Exec["fetch-go"], File["/opt/go"] ],
}
service { "dockerd" :
ensure => "running",
start => "/sbin/initctl start dockerd",
stop => "/sbin/initctl stop dockerd",
require => [ Exec["build-docker"], File["/etc/init/dockerd.conf"] ],
name => "dockerd",
provider => "base"
}
}

View File

@@ -1,12 +0,0 @@
description "Run dockerd"
stop on runlevel [!2345]
start on runlevel [3]
# if you want it to automatically restart if it crashes, leave the next line in
respawn
script
test -f /etc/default/locale && . /etc/default/locale || true
LANG=$LANG LC_ALL=$LANG /opt/go/bin/docker -d >> /var/log/dockerd 2>&1
end script

View File

@@ -1,30 +0,0 @@
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
export GOPATH=/opt/go
export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
docker=/opt/go/src/github.com/dotcloud/docker
if [ -d $docker ]; then
cd $docker
fi

View File

@@ -7,6 +7,7 @@ import (
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strings"
)
@@ -135,7 +136,7 @@ func (graph *Graph) getRemoteImage(stdout io.Writer, imgId string, authConfig *a
if err != nil {
return nil, nil, err
}
return img, ProgressReader(res.Body, int(res.ContentLength), stdout), nil
return img, ProgressReader(res.Body, int(res.ContentLength), stdout, "Downloading %v/%v (%v)"), nil
}
func (graph *Graph) PullImage(stdout io.Writer, imgId string, authConfig *auth.AuthConfig) error {
@@ -269,24 +270,20 @@ func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth
return fmt.Errorf("Failed to retrieve layer upload location: %s", err)
}
// FIXME: Don't do this :D. Check the S3 requierement and implement chunks of 5MB
// FIXME2: I won't stress it enough, DON'T DO THIS! very high priority
layerData2, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
tmp, err := ioutil.ReadAll(layerData2)
// FIXME: stream the archive directly to the registry instead of buffering it on disk. This requires either:
// a) Implementing S3's proprietary streaming logic, or
// b) Stream directly to the registry instead of S3.
// I prefer option b. because it doesn't lock us into a proprietary cloud service.
tmpLayer, err := graph.TempLayerArchive(img.Id, Xz, stdout)
if err != nil {
return err
}
layerLength := len(tmp)
layerData, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
if err != nil {
return fmt.Errorf("Failed to generate layer archive: %s", err)
}
req3, err := http.NewRequest("PUT", url.String(), ProgressReader(layerData.(io.ReadCloser), layerLength, stdout))
defer os.Remove(tmpLayer.Name())
req3, err := http.NewRequest("PUT", url.String(), ProgressReader(tmpLayer, int(tmpLayer.Size), stdout, "Uploading %v/%v (%v)"))
if err != nil {
return err
}
req3.ContentLength = int64(layerLength)
req3.ContentLength = int64(tmpLayer.Size)
req3.TransferEncoding = []string{"none"}
res3, err := client.Do(req3)

View File

@@ -6,6 +6,7 @@ import (
"github.com/dotcloud/docker/auth"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
@@ -14,6 +15,11 @@ import (
"time"
)
type Capabilities struct {
MemoryLimit bool
SwapLimit bool
}
type Runtime struct {
root string
repository string
@@ -23,6 +29,10 @@ type Runtime struct {
repositories *TagStore
authConfig *auth.AuthConfig
idIndex *TruncIndex
capabilities *Capabilities
kernelVersion *KernelVersionInfo
autoRestart bool
volumes *Graph
}
var sysInitPath string
@@ -69,12 +79,58 @@ func (runtime *Runtime) containerRoot(id string) string {
return path.Join(runtime.repository, id)
}
func (runtime *Runtime) mergeConfig(userConf, imageConf *Config) {
if userConf.Hostname != "" {
userConf.Hostname = imageConf.Hostname
}
if userConf.User != "" {
userConf.User = imageConf.User
}
if userConf.Memory == 0 {
userConf.Memory = imageConf.Memory
}
if userConf.MemorySwap == 0 {
userConf.MemorySwap = imageConf.MemorySwap
}
if userConf.PortSpecs == nil || len(userConf.PortSpecs) == 0 {
userConf.PortSpecs = imageConf.PortSpecs
}
if !userConf.Tty {
userConf.Tty = userConf.Tty
}
if !userConf.OpenStdin {
userConf.OpenStdin = imageConf.OpenStdin
}
if !userConf.StdinOnce {
userConf.StdinOnce = imageConf.StdinOnce
}
if userConf.Env == nil || len(userConf.Env) == 0 {
userConf.Env = imageConf.Env
}
if userConf.Cmd == nil || len(userConf.Cmd) == 0 {
userConf.Cmd = imageConf.Cmd
}
if userConf.Dns == nil || len(userConf.Dns) == 0 {
userConf.Dns = imageConf.Dns
}
}
func (runtime *Runtime) Create(config *Config) (*Container, error) {
// Lookup image
img, err := runtime.repositories.LookupImage(config.Image)
if err != nil {
return nil, err
}
if img.Config != nil {
runtime.mergeConfig(config, img.Config)
}
if config.Cmd == nil {
return nil, fmt.Errorf("No command specified")
}
// Generate id
id := GenerateId()
// Generate default hostname
@@ -95,6 +151,7 @@ func (runtime *Runtime) Create(config *Config) (*Container, error) {
// FIXME: do we need to store this in the container?
SysInitPath: sysInitPath,
}
container.root = runtime.containerRoot(container.Id)
// Step 1: create the container directory.
// This doubles as a barrier to avoid race conditions.
@@ -159,29 +216,6 @@ func (runtime *Runtime) Register(container *Container) error {
// init the wait lock
container.waitLock = make(chan struct{})
// FIXME: if the container is supposed to be running but is not, auto restart it?
// if so, then we need to restart monitor and init a new lock
// If the container is supposed to be running, make sure of it
if container.State.Running {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
Debugf("Container %s was supposed to be running be is not.", container.Id)
container.State.setStopped(-127)
if err := container.ToDisk(); err != nil {
return err
}
}
}
}
// If the container is not running or just has been flagged not running
// then close the wait lock chan (will be reset upon start)
if !container.State.Running {
close(container.waitLock)
}
// Even if not running, we init the lock (prevents races in start/stop/kill)
container.State.initLock()
@@ -199,6 +233,47 @@ func (runtime *Runtime) Register(container *Container) error {
// done
runtime.containers.PushBack(container)
runtime.idIndex.Add(container.Id)
// When we actually restart, Start() do the monitoring.
// However, when we simply 'reattach', we have to restart a monitor
nomonitor := false
// FIXME: if the container is supposed to be running but is not, auto restart it?
// if so, then we need to restart monitor and init a new lock
// If the container is supposed to be running, make sure of it
if container.State.Running {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
Debugf("Container %s was supposed to be running be is not.", container.Id)
if runtime.autoRestart {
Debugf("Restarting")
container.State.Ghost = false
container.State.setStopped(0)
if err := container.Start(); err != nil {
return err
}
nomonitor = true
} else {
Debugf("Marking as stopped")
container.State.setStopped(-127)
if err := container.ToDisk(); err != nil {
return err
}
}
}
}
}
// If the container is not running or just has been flagged not running
// then close the wait lock chan (will be reset upon start)
if !container.State.Running {
close(container.waitLock)
} else if !nomonitor {
container.allocateNetwork()
go container.monitor()
}
return nil
}
@@ -238,7 +313,7 @@ func (runtime *Runtime) Destroy(container *Container) error {
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository
func (runtime *Runtime) Commit(id, repository, tag, comment, author string) (*Image, error) {
func (runtime *Runtime) Commit(id, repository, tag, comment, author string, config *Config) (*Image, error) {
container := runtime.Get(id)
if container == nil {
return nil, fmt.Errorf("No such container: %s", id)
@@ -250,7 +325,7 @@ func (runtime *Runtime) Commit(id, repository, tag, comment, author string) (*Im
return nil, err
}
// Create a new image from the container's base layers + a new layer from container changes
img, err := runtime.graph.Create(rwTar, container, comment, author)
img, err := runtime.graph.Create(rwTar, container, comment, author, config)
if err != nil {
return nil, err
}
@@ -280,12 +355,47 @@ func (runtime *Runtime) restore() error {
return nil
}
// FIXME: harmonize with NewGraph()
func NewRuntime() (*Runtime, error) {
return NewRuntimeFromDirectory("/var/lib/docker")
func (runtime *Runtime) UpdateCapabilities(quiet bool) {
if cgroupMemoryMountpoint, err := FindCgroupMountpoint("memory"); err != nil {
if !quiet {
log.Printf("WARNING: %s\n", err)
}
} else {
_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes"))
_, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes"))
runtime.capabilities.MemoryLimit = err1 == nil && err2 == nil
if !runtime.capabilities.MemoryLimit && !quiet {
log.Printf("WARNING: Your kernel does not support cgroup memory limit.")
}
_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
runtime.capabilities.SwapLimit = err == nil
if !runtime.capabilities.SwapLimit && !quiet {
log.Printf("WARNING: Your kernel does not support cgroup swap limit.")
}
}
}
func NewRuntimeFromDirectory(root string) (*Runtime, error) {
// FIXME: harmonize with NewGraph()
func NewRuntime(autoRestart bool) (*Runtime, error) {
runtime, err := NewRuntimeFromDirectory("/var/lib/docker", autoRestart)
if err != nil {
return nil, err
}
if k, err := GetKernelVersion(); err != nil {
log.Printf("WARNING: %s\n", err)
} else {
runtime.kernelVersion = k
if CompareKernelVersion(k, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
}
}
runtime.UpdateCapabilities(false)
return runtime, nil
}
func NewRuntimeFromDirectory(root string, autoRestart bool) (*Runtime, error) {
runtimeRepo := path.Join(root, "containers")
if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
@@ -296,6 +406,10 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) {
if err != nil {
return nil, err
}
volumes, err := NewGraph(path.Join(root, "volumes"))
if err != nil {
return nil, err
}
repositories, err := NewTagStore(path.Join(root, "repositories"), g)
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
@@ -321,6 +435,9 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) {
repositories: repositories,
authConfig: authConfig,
idIndex: NewTruncIndex(),
capabilities: &Capabilities{},
autoRestart: autoRestart,
volumes: volumes,
}
if err := runtime.restore(); err != nil {

View File

@@ -48,8 +48,6 @@ func layerArchive(tarfile string) (io.Reader, error) {
}
func init() {
NO_MEMORY_LIMIT = os.Getenv("NO_MEMORY_LIMIT") == "1"
// Hack to run sys init during unit testing
if SelfPath() == "/sbin/init" {
SysInit()
@@ -62,8 +60,10 @@ func init() {
panic("docker tests needs to be run as root")
}
NetworkBridgeIface = "testdockbr0"
// Make it our Store root
runtime, err := NewRuntimeFromDirectory(unitTestStoreBase)
runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, false)
if err != nil {
panic(err)
}
@@ -89,11 +89,11 @@ func newTestRuntime() (*Runtime, error) {
return nil, err
}
runtime, err := NewRuntimeFromDirectory(root)
runtime, err := NewRuntimeFromDirectory(root, false)
if err != nil {
return nil, err
}
runtime.UpdateCapabilities(true)
return runtime, nil
}
@@ -275,7 +275,16 @@ func TestAllocatePortLocalhost(t *testing.T) {
t.Fatal(err)
}
defer container.Kill()
time.Sleep(300 * time.Millisecond) // Wait for the container to run
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for {
if container.State.Running {
break
}
time.Sleep(10 * time.Millisecond)
}
})
conn, err := net.Dial("tcp",
fmt.Sprintf(
"localhost:%s", container.NetworkSettings.PortMapping["5555"],
@@ -295,6 +304,7 @@ func TestAllocatePortLocalhost(t *testing.T) {
string(output),
)
}
container.Wait()
}
func TestRestore(t *testing.T) {
@@ -310,7 +320,7 @@ func TestRestore(t *testing.T) {
t.Fatal(err)
}
runtime1, err := NewRuntimeFromDirectory(root)
runtime1, err := NewRuntimeFromDirectory(root, false)
if err != nil {
t.Fatal(err)
}
@@ -369,7 +379,7 @@ func TestRestore(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
runtime2, err := NewRuntimeFromDirectory(root)
runtime2, err := NewRuntimeFromDirectory(root, false)
if err != nil {
t.Fatal(err)
}

View File

@@ -69,23 +69,30 @@ type progressReader struct {
readTotal int // Expected stream length (bytes)
readProgress int // How much has been read so far (bytes)
lastUpdate int // How many bytes read at least update
template string // Template to print. Default "%v/%v (%v)"
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.readProgress += read
// Only update progress for every 1% read
updateEvery := int(0.01 * float64(r.readTotal))
if r.readProgress-r.lastUpdate > updateEvery || r.readProgress == r.readTotal {
fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r",
r.readProgress,
r.readTotal,
float64(r.readProgress)/float64(r.readTotal)*100)
updateEvery := 4096
if r.readTotal > 0 {
// Only update progress for every 1% read
if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery {
updateEvery = increment
}
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, "?", "n/a")
}
r.lastUpdate = r.readProgress
}
// Send newline when complete
if err == io.EOF {
if err != nil {
fmt.Fprintf(r.output, "\n")
}
@@ -94,8 +101,11 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader {
return &progressReader{r, output, size, 0, 0}
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template string) *progressReader {
if template == "" {
template = "%v/%v (%v)"
}
return &progressReader{r, output, size, 0, 0, template}
}
// HumanDuration returns a human-readable approximation of a duration
@@ -384,3 +394,65 @@ func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
}
return written, err
}
type KernelVersionInfo struct {
Kernel int
Major int
Minor int
Flavor string
}
// FIXME: this doens't build on Darwin
func GetKernelVersion() (*KernelVersionInfo, error) {
return getKernelVersion()
}
func (k *KernelVersionInfo) String() string {
return fmt.Sprintf("%d.%d.%d-%s", k.Kernel, k.Major, k.Minor, k.Flavor)
}
// Compare two KernelVersionInfo struct.
// Returns -1 if a < b, = if a == b, 1 it a > b
func CompareKernelVersion(a, b *KernelVersionInfo) int {
if a.Kernel < b.Kernel {
return -1
} else if a.Kernel > b.Kernel {
return 1
}
if a.Major < b.Major {
return -1
} else if a.Major > b.Major {
return 1
}
if a.Minor < b.Minor {
return -1
} else if a.Minor > b.Minor {
return 1
}
return 0
}
func FindCgroupMountpoint(cgroupType string) (string, error) {
output, err := ioutil.ReadFile("/proc/mounts")
if err != nil {
return "", err
}
// /proc/mounts has 6 fields per line, one mount per line, e.g.
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
for _, line := range strings.Split(string(output), "\n") {
parts := strings.Split(line, " ")
if parts[2] == "cgroup" {
for _, opt := range strings.Split(parts[3], ",") {
if opt == cgroupType {
return parts[1], nil
}
}
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}

View File

@@ -228,3 +228,36 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
}
}
func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
if r := CompareKernelVersion(a, b); r != result {
t.Fatalf("Unepected kernel version comparaison result. Found %d, expected %d", r, result)
}
}
func TestCompareKernelVersion(t *testing.T) {
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
0)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-1)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
1)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"},
0)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
1)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
-1)
}