Compare commits

...

109 Commits

Author SHA1 Message Date
Solomon Hykes
90668a8a99 Bumped version to 0.2.0 2013-04-23 23:15:09 -07:00
Solomon Hykes
c7fd84b8a0 Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-04-23 23:05:38 -07:00
Solomon Hykes
874a40ed3a - Dev: dockerbuilder requires a fake initctl because 'apt-get install devscripts' insists on installing a stupid daemon I never asked for in the first place. 2013-04-23 23:04:54 -07:00
Solomon Hykes
370fafacbf Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-04-23 22:57:50 -07:00
Solomon Hykes
a0478f726d dockerbuilder: upload most recent Ubuntu package (note version FOO might not yet be packaged at tag vFOO) 2013-04-23 22:57:34 -07:00
Solomon Hykes
e5bc5a2e31 Merge pull request #427 from dhrp/docs
- Packaging: Fixed Vagrantfile
* Documentation: Updated install instructions
2013-04-23 19:49:28 -07:00
Solomon Hykes
25fc3a7e76 Merge pull request #470 from dotcloud/packaging-ubuntu
* Packaging: Add 0.1.8 to Ubuntu packaging changelog
* Packaging: Update the Ubuntu maintainer manual
2013-04-23 19:46:56 -07:00
Solomon Hykes
b3ab0b561e Makefile improvements
+ Convenience rules: srcrelease, deps
	- Separate dependency vendoring from building the binary
	  (re-download dependencies with 'make deps')
2013-04-23 19:41:38 -07:00
Solomon Hykes
8b8c8bf7cb Fix 'make release RELEASE_VERSION=master' 2013-04-23 18:50:53 -07:00
Solomon Hykes
a8651a23b2 make release: build a binary release of the most recent version tag 2013-04-23 18:32:59 -07:00
Daniel Mizyrycki
f744cfd5a7 packaging-ubuntu: update maintainer documentation for changelog file 2013-04-23 13:51:03 -07:00
Solomon Hykes
e03b241fb1 dockerbuilder: build with 'make; cp -R ./bin' 2013-04-23 12:07:54 -07:00
Thatcher Peskens
1ddca1948b Fixed remaining issues and conflicts created by last merge. 2013-04-23 12:04:53 -07:00
Solomon Hykes
2485bb2cd2 dockerbuilder: use a pristine GOPATH, with the fresh checkout registered at the right path (for internal submodules) 2013-04-23 11:45:47 -07:00
Solomon Hykes
7577f48dc4 dockerbuilder: build in current directory instead /go and /tmp 2013-04-23 10:53:02 -07:00
Solomon Hykes
0512cf9c83 dockerbuilder: /usr/local/bin is already set by docker 2013-04-23 10:49:58 -07:00
Solomon Hykes
73da7a12e7 Increased timeout in TCP port allocation test to pass on slower machines 2013-04-23 10:12:46 -07:00
Solomon Hykes
50f5723f1d Merge pull request #465 from shamrin/patch-2
- Documentation: fixed typo in "Building blocks"
2013-04-23 09:02:19 -07:00
Alexey Shamrin
cbc4eccd50 fixed typo in buildingblocks.rst 2013-04-23 12:52:55 +04:00
Solomon Hykes
cff26b3a6c Merge pull request #464 from tianon/patch-1
- Runtime: adapt cgroup capability detection to work on Gentoo
2013-04-23 00:36:10 -07:00
Solomon Hykes
329c3e0ffd Merge pull request #462 from dotcloud/initial_changelog
+ Add initial Changelog
2013-04-23 00:32:25 -07:00
Solomon Hykes
4f6cc5c733 Completed Changelog for all past versions 2013-04-23 00:30:18 -07:00
Tianon Gravi
e413340723 Update FindCgroupMountpoint to be more forgiving
On Gentoo, the memory cgroup is mounted at /sys/fs/cgroup/memory, but the mount line looks like the following:
memory on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)

(note that the first word on the line is "memory", not "cgroup", but the other essentials are there, namely the type of cgroup and the memory mount option)
2013-04-23 01:09:29 -06:00
Solomon Hykes
95e066d24f - Runtime: ghost containers can be killed. 2013-04-22 22:30:33 -07:00
Solomon Hykes
82b8f7a565 hack/dockerbuilder: a standard build environment for building and uploading official binary builds of docker... inside docker 2013-04-22 22:29:12 -07:00
Solomon Hykes
97badbd29e Bumped version to 0.1.8 2013-04-22 22:04:57 -07:00
Thatcher Peskens
5a5e417d46 Merge remote-tracking branch 'dotcloud/master' into dhrp/docs
Conflicts:
	docker/docker.go
	docs/sources/installation/archlinux.rst
	docs/sources/installation/index.rst
	docs/sources/installation/ubuntulinux.rst
	runtime.go
	utils.go
2013-04-22 18:44:50 -07:00
Thatcher Peskens
4031a01af1 Merged changes 2013-04-22 18:38:42 -07:00
Guillaume J. Charmes
0b0d958b88 Merge pull request #463 from dotcloud/improve_pid_file_feature
Check that the pid in pidfile exists before preventing docker to start
2013-04-22 18:24:03 -07:00
Guillaume J. Charmes
03e4704ae5 Merge pull request #442 from dotcloud/fix_deleted_file_diff
Use aufs to handle parents whiteouts instead of doing it manually
2013-04-22 18:23:46 -07:00
Guillaume J. Charmes
7a8ac76299 Merge pull request #456 from dotcloud/453-generic_kernel_detection-fix
453 generic kernel detection fix
2013-04-22 18:20:17 -07:00
Guillaume J. Charmes
c05c91ca3b Make kernel detection work without suffix 2013-04-22 18:15:33 -07:00
Guillaume J. Charmes
b76d63cb0c Forbid attach to ghost 2013-04-22 17:53:32 -07:00
Guillaume J. Charmes
f926ed182f Allow to kill/stop ghosts 2013-04-22 17:53:32 -07:00
Guillaume J. Charmes
d440782e17 Allow to kill container after docker server restarts 2013-04-22 17:52:38 -07:00
Guillaume J. Charmes
82848d4158 Allow to wait on container even after docker server restarts using lxc-info 2013-04-22 17:52:38 -07:00
Guillaume J. Charmes
97535e5a64 Add unit test for file deletion 2013-04-22 17:51:09 -07:00
Guillaume J. Charmes
f079fbe3fa Check that the pid in pidfile exists before preventing docker to start 2013-04-22 15:57:31 -07:00
Guillaume J. Charmes
90d144b612 Merge pull request #457 from shamrin/patch-1
README.md: `docker port` instead of just `port`
2013-04-22 15:40:03 -07:00
Guillaume J. Charmes
d3db94696d Merge pull request #461 from neomantra/master
Fix typo (ghot -> ghost)
2013-04-22 15:39:34 -07:00
Evan Wies
ffe16e3224 Fix typo (ghot -> ghost) 2013-04-22 18:37:06 -04:00
Guillaume J. Charmes
0b60829df7 Add initial changelog 2013-04-22 15:26:06 -07:00
Thatcher Peskens
690e118670 Updated gettingstarted with quicker install. 2013-04-22 13:36:00 -07:00
Alexey Shamrin
038e1d174b README.md: docker port instead of just port 2013-04-23 00:27:23 +04:00
Thatcher Peskens
6c8dcd5cbb Updated Vagrantfile and documentation to reflect new installation path using Ubuntu's PPA, also switched everything to use Ubuntu 12.04 by default. 2013-04-22 13:10:32 -07:00
Guillaume J. Charmes
16aeb77d51 Move the kernel detection to arch specific files 2013-04-22 12:08:59 -07:00
Guillaume J. Charmes
4ac3b803b9 Make the kernel version detection more generic 2013-04-22 11:39:56 -07:00
Guillaume J. Charmes
3514e47edc Do not prevent docker from running when kernel detection fails 2013-04-22 11:26:34 -07:00
Guillaume J. Charmes
acb546cd1b Fix race within TestRunDisconnectTty 2013-04-22 11:16:32 -07:00
Guillaume J. Charmes
2ced94b414 Merge pull request #454 from tianon/master
Update utils.go to not enforce extra constraints on the kernel "flavor" (such as being integral or even comparable one to another)
2013-04-22 08:02:42 -07:00
Guillaume J. Charmes
71b5806614 Do not stop execution if cgroup mountpoint is not found 2013-04-22 00:44:57 -04:00
Tianon Gravi
1f65c6bf4c Update utils.go to not enforce extra constraints on the kernel "flavor" (such as being integral or even comparable one to another)
This is especially to fix the current docker on kernels such as gentoo-sources, where the "flavor" is the string "gentoo", and that obviously fails to be converted to an integer.
2013-04-21 19:19:38 -06:00
Solomon Hykes
965e8a02d2 'docker push' shows an additional progress bar while it buffers the archive to disk. Fixes #451. 2013-04-21 15:29:26 -07:00
Solomon Hykes
baacae8345 'docker push' buffers filesystem archives on disk instead of memory. 2013-04-21 14:23:55 -07:00
Solomon Hykes
52cedb8a05 Better title in ubuntu install doc 2013-04-20 18:26:15 -07:00
Solomon Hykes
15c7e72e2a Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-04-20 18:19:04 -07:00
Solomon Hykes
76b40ad6c9 Merge remote-tracking branch 'origin/check_kernel_capabilities' 2013-04-20 17:40:25 -07:00
Solomon Hykes
6909f3911f Merge pull request #422 from shawnsi/arch-docs
Arch docs
2013-04-20 17:35:43 -07:00
Solomon Hykes
3b6c540fe8 Bumped version to 0.1.7 2013-04-20 17:35:00 -07:00
Solomon Hykes
d49a273071 Moved 'under the hood' below install instructions and examples in README 2013-04-20 17:33:02 -07:00
Solomon Hykes
1201c418cd Fix Ubuntu install doc 2013-04-20 17:31:49 -07:00
Solomon Hykes
4a9c3a92e1 Formatting fix in ubuntu install doc 2013-04-20 17:30:33 -07:00
Solomon Hykes
28831a412f Link to public images list in the README 2013-04-20 17:29:41 -07:00
Solomon Hykes
70cf467fdf Removed duplicate ubuntu binary install instructions from README 2013-04-20 17:29:12 -07:00
Solomon Hykes
c40f01319f Cleaned up install instructions in the README
* Addded quick install on ubuntu as the 1st install option
	* Grouped other binary installs under "binary installs"
	* Removed duplicate binary ubuntu installs (linked to the docs)
	* Improved "build from source" instructions
2013-04-20 17:26:50 -07:00
Thatcher Peskens
0731d1a582 Updated ubuntu install 2013-04-19 20:59:43 -07:00
Thatcher Peskens
8ecde8f9a5 Updated documentation and fixed Vagrantfile 2013-04-19 20:57:50 -07:00
Solomon Hykes
911925b54a Add a test for allocating tcp ports and reaching them on localhost 2013-04-19 20:46:07 -07:00
Solomon Hykes
7f1a32b9ff Shutdown loopback-to-loopback proxy when unmapping a port 2013-04-19 20:44:25 -07:00
Solomon Hykes
930e9a7e43 Emulate DNAT in userland for loopback-to-loopback connections. This makes container ports available from localhost. 2013-04-19 19:35:44 -07:00
Solomon Hykes
61259ab4b4 Exclude loopback-to-loopback connections from DNAT rules, to allow userland proxying 2013-04-19 19:32:32 -07:00
Solomon Hykes
931ca464a7 'docker ps' shows port mappings 2013-04-19 19:29:13 -07:00
Guillaume J. Charmes
cc5a044a8c update TestRunDisconnectTty to reflect the correct behavior of CmdRun 2013-04-19 17:51:41 -07:00
Guillaume J. Charmes
0eb425426f Merge pull request #444 from lopter/master
Fixe the race condition between docker run and docker logs from #428
2013-04-19 18:09:58 -07:00
Guillaume J. Charmes
3bfb70db24 Wait for the container terminate at the end of CmdRun
Fixes the race condition between docker run and docker logs from #428.
2013-04-19 18:06:13 -07:00
Guillaume J. Charmes
e49af5b6de Use aufs to handle parents whitouts instead of doing it manually 2013-04-19 16:33:25 -07:00
Solomon Hykes
d8416539b3 contrib/vagrant-docker: a placeholder to centralize collaboration on an official docker provider for vagrant 2013-04-19 15:55:34 -07:00
Solomon Hykes
a76c3a9c95 Merge branch 'master' of ssh://github.com/dotcloud/docker 2013-04-19 12:55:32 -07:00
Solomon Hykes
e81ddb2dc7 Fixed 'hack' rule in Makefile 2013-04-19 12:55:17 -07:00
Solomon Hykes
4d728821e3 Merge remote-tracking branch 'origin/buildbot' 2013-04-19 12:47:22 -07:00
Guillaume J. Charmes
e92c4b1f39 Merge pull request #415 from dotcloud/261-choose_grace_period-feature
added -t in docker stop and restart to choose grace period
2013-04-19 12:42:34 -07:00
Solomon Hykes
152a9f77b4 Fix ubuntu packaging to build from a clean checkout of the correct git tag 2013-04-19 12:39:40 -07:00
Solomon Hykes
bfb84b564c Merge remote-tracking branch 'origin/30-packaging-ubuntu' 2013-04-19 11:31:53 -07:00
Solomon Hykes
e8a67f632e Cleanup examples on README 2013-04-18 22:37:45 -07:00
Solomon Hykes
3d2fd8a650 Small wording fix in README 2013-04-18 22:24:52 -07:00
Solomon Hykes
79a78d37e7 Add examples to the README 2013-04-18 22:24:29 -07:00
Solomon Hykes
3ae5c45d9a Fix examples in README to no longer rely on standalone mode 2013-04-18 22:22:00 -07:00
Guillaume J. Charmes
f3e89fae28 Use mount to determine the cgroup mountpoint 2013-04-18 21:57:58 -07:00
Guillaume J. Charmes
c42a4179fc Add unit tests for CompareKernelVersion 2013-04-18 21:34:34 -07:00
Guillaume J. Charmes
2d32ac8cff Improve the docker version output 2013-04-18 21:08:33 -07:00
Guillaume J. Charmes
f68d107a13 Remove the NO_MEMORY_LIMIT constant 2013-04-18 21:08:20 -07:00
Guillaume J. Charmes
640efc2ed2 Add capabilities check to allow docker to run on kernel that does not have all options 2013-04-18 20:55:41 -07:00
Guillaume J. Charmes
003622c8b6 Check kernel version and display warning if too low 2013-04-18 20:47:24 -07:00
Thatcher Peskens
6de5ca1e64 Added redirect from old location of documentation (/documentation), these was the location when we were on github. 2013-04-18 16:00:18 -07:00
Solomon Hykes
deb55e416e contrib/docker-build: don't remove the base image if the first build step fails 2013-04-18 14:46:17 -07:00
Shawn Siefkas
7eda9c64b8 Updating the arch linux installation docs
New AUR package name
Adding systemd service unit info
2013-04-18 09:20:23 -05:00
Shawn Siefkas
84c13a3dcf Adding archlinux packaging documentation 2013-04-18 09:17:31 -05:00
Victor Vieux
90602ab62a fixed test 2013-04-18 16:03:50 +02:00
Daniel Mizyrycki
8e6ba343bf packaging-ubuntu, issue #30: Ensure docker package installs and passes tests on official vagrant Ubuntu 12.04 box 2013-04-17 21:10:53 -07:00
Daniel Mizyrycki
523cd8e29c packaging-ubuntu, issue #30: streamline building and uploading to PPA 2013-04-17 20:56:34 -07:00
Daniel Mizyrycki
fd39af7f85 packaging-ubuntu: move original files in place for update 2013-04-17 20:56:34 -07:00
Solomon Hykes
ee82870ff7 Bumped version to 0.1.6 to mark image format change (author field) 2013-04-17 20:18:35 -07:00
Solomon Hykes
227a8142a3 Record the author of an image with 'docker commit -author' 2013-04-17 20:13:11 -07:00
Solomon Hykes
4ef2d5c1e6 Added 'author' field to the image format 2013-04-17 19:58:17 -07:00
Victor Vieux
1615bb08c7 added -t in docker stop and restart to choose grace period 2013-04-17 11:56:30 +02:00
Daniel Mizyrycki
d2c1850fb5 testing: Make postcommit more generic 2013-04-10 11:32:48 -07:00
Solomon Hykes
cb68662b9b Merge pull request #377 from jbarbier/buildbot
Fix the Makefile, rule=hack to make it work on Windows
2013-04-09 20:53:29 -07:00
Julien Barbier
b7cda3288e Fix the Makefile, rule=hack to make it work on Windows 2013-04-09 19:07:50 -07:00
Daniel Mizyrycki
bbaa975ec8 testing: Add buildbot VM 2013-04-09 14:37:37 -07:00
68 changed files with 1966 additions and 700 deletions

86
CHANGELOG.md Normal file
View File

@@ -0,0 +1,86 @@
# Changelog
## 0.2.0 (2012-04-23)
- Runtime: ghost containers can be killed and waited for
* Documentation: update install intructions
- Packaging: fix Vagrantfile
- Development: automate releasing binaries and ubuntu packages
+ Add a changelog
- Various bugfixes
## 0.1.8 (2013-04-22)
- Dynamically detect cgroup capabilities
- Issue stability warning on kernels <3.8
- 'docker push' buffers on disk instead of memory
- Fix 'docker diff' for removed files
- Fix 'docker stop' for ghost containers
- Fix handling of pidfile
- Various bugfixes and stability improvements
## 0.1.7 (2013-04-18)
- Container ports are available on localhost
- 'docker ps' shows allocated TCP ports
- Contributors can run 'make hack' to start a continuous integration VM
- Streamline ubuntu packaging & uploading
- Various bugfixes and stability improvements
## 0.1.6 (2013-04-17)
- Record the author an image with 'docker commit -author'
## 0.1.5 (2013-04-17)
- Disable standalone mode
- Use a custom DNS resolver with 'docker -d -dns'
- Detect ghost containers
- Improve diagnosis of missing system capabilities
- Allow disabling memory limits at compile time
- Add debian packaging
- Documentation: installing on Arch Linux
- Documentation: running Redis on docker
- Fixed lxc 0.9 compatibility
- Automatically load aufs module
- Various bugfixes and stability improvements
## 0.1.4 (2013-04-09)
- Full support for TTY emulation
- Detach from a TTY session with the escape sequence `C-p C-q`
- Various bugfixes and stability improvements
- Minor UI improvements
- Automatically create our own bridge interface 'docker0'
## 0.1.3 (2013-04-04)
- Choose TCP frontend port with '-p :PORT'
- Layer format is versioned
- Major reliability improvements to the process manager
- Various bugfixes and stability improvements
## 0.1.2 (2013-04-03)
- Set container hostname with 'docker run -h'
- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
- Various bugfixes and stability improvements
- UI polish
- Progress bar on push/pull
- Use XZ compression by default
- Make IP allocator lazy
## 0.1.1 (2013-03-31)
- Display shorthand IDs for convenience
- Stabilize process management
- Layers can include a commit message
- Simplified 'docker attach'
- Fixed support for re-attaching
- Various bugfixes and stability improvements
- Auto-download at run
- Auto-login on push
- Beefed up documentation
## 0.1.0 (2013-03-23)
- First release
- Implement registry in order to push/pull images
- TCP port allocation
- Fix termcaps on Linux
- Add documentation
- Add Vagrant support with Vagrantfile
- Add unit tests
- Add repository/tags to ease image management
- Improve the layer implementation

View File

@@ -1,5 +1,9 @@
DOCKER_PACKAGE := github.com/dotcloud/docker
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
SRCRELEASE := docker-$(RELEASE_VERSION)
BINRELEASE := docker-$(RELEASE_VERSION).tgz
GIT_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(CURDIR)/.gopath
GOPATH ?= $(BUILD_DIR)
@@ -13,10 +17,7 @@ endif
GIT_COMMIT = $(shell git rev-parse --short HEAD)
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
NO_MEMORY_LIMIT ?= 0
export NO_MEMORY_LIMIT
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS) -X main.NO_MEMORY_LIMIT $(NO_MEMORY_LIMIT)"
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
SRC_DIR := $(GOPATH)/src
@@ -26,18 +27,38 @@ DOCKER_MAIN := $(DOCKER_DIR)/docker
DOCKER_BIN_RELATIVE := bin/docker
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
.PHONY: all clean test
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
all: $(DOCKER_BIN)
$(DOCKER_BIN): $(DOCKER_DIR)
@mkdir -p $(dir $@)
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
@echo $(DOCKER_BIN_RELATIVE) is created.
$(DOCKER_DIR):
@mkdir -p $(dir $@)
@rm -f $@
@ln -sf $(CURDIR)/ $@
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
whichrelease:
echo $(RELEASE_VERSION)
release: $(BINRELEASE)
srcrelease: $(SRCRELEASE)
deps: $(DOCKER_DIR)
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
$(SRCRELEASE):
rm -fr $(SRCRELEASE)
git clone $(GIT_ROOT) $(SRCRELEASE)
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
# A binary release ready to be uploaded to a mirror
$(BINRELEASE): $(SRCRELEASE)
rm -f $(BINRELEASE)
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
clean:
@rm -rf $(dir $(DOCKER_BIN))
@@ -52,3 +73,6 @@ test: all
fmt:
@gofmt -s -l -w .
hack:
cd $(CURDIR)/buildbot && vagrant up

181
README.md
View File

@@ -33,123 +33,85 @@ Notable features
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
* The [Go](http://golang.org) programming language;
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
Install instructions
==================
Building from source
--------------------
1. Make sure you have a [Go language](http://golang.org) compiler.
On a Debian/wheezy or Ubuntu 12.10 install the package:
```bash
$ sudo apt-get install golang-go
```
2. Execute ``make``
This command will install all necessary dependencies and build the
executable that you can find in ``bin/docker``
3. Should you like to see what's happening, run ``make`` with ``VERBOSE=1`` parameter:
```bash
$ make VERBOSE=1
```
Installing on Ubuntu 12.04 and 12.10
------------------------------------
1. Install dependencies:
```bash
sudo apt-get install lxc bsdtar
sudo apt-get install linux-image-extra-`uname -r`
```
The `linux-image-extra` package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
2. Install the latest docker binary:
```bash
wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
tar -xf docker-master.tgz
```
3. Run your first container!
```bash
cd docker-master
sudo ./docker pull base
sudo ./docker run -i -t base /bin/bash
```
Consider adding docker to your `PATH` for simplicity.
Installing on other Linux distributions
Quick install on Ubuntu 12.04 and 12.10
---------------------------------------
Right now, the officially supported distributions are:
```bash
curl get.docker.io | sh -x
```
* Ubuntu 12.04 (precise LTS)
* Ubuntu 12.10 (quantal)
Binary installs
----------------
Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested.
Docker supports the following binary installation methods.
Note that some methods are community contributions and not yet officially supported.
Some streamlined (but possibly outdated) installation paths' are available from the website: http://docker.io/documentation/
* [Ubuntu 12.04 and 12.10 (officially supported)](http://docs.docker.io/en/latest/installation/ubuntulinux/)
* [Arch Linux](http://docs.docker.io/en/latest/installation/archlinux/)
* [MacOS X (with Vagrant)](http://docs.docker.io/en/latest/installation/macos/)
* [Windows (with Vagrant)](http://docs.docker.io/en/latest/installation/windows/)
* [Amazon EC2 (with Vagrant)](http://docs.docker.io/en/latest/installation/amazon/)
Installing from source
----------------------
1. Make sure you have a [Go language](http://golang.org/doc/install) compiler and [git](http://git-scm.com) installed.
2. Checkout the source code
```bash
git clone http://github.com/dotcloud/docker
```
3. Build the docker binary
```bash
cd docker
make VERBOSE=1
sudo cp ./bin/docker /usr/local/bin/docker
```
Usage examples
==============
Running an interactive shell
----------------------------
First run the docker daemon
---------------------------
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
```bash
# Download a base image
docker pull base
# Run an interactive shell in the base image,
# allocate a tty, attach stdin and stdout
docker run -i -t base /bin/bash
# On a production system you want this running in an init script
sudo docker -d &
```
Detaching from the interactive shell
------------------------------------
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client can run from any account.
```bash
# Now you can run docker commands from any account.
docker help
```
# In order to detach without killing the shell, you can use the escape sequence Ctrl-p + Ctrl-q
# Note: this works only in tty mode (run with -t option).
Throwaway shell in a base ubuntu image
--------------------------------------
```bash
docker pull ubuntu:12.10
# Run an interactive shell, allocate a tty, attach stdin and stdout
# To detach the tty without exiting the shell, use the escape sequence Ctrl-p + Ctrl-q
docker run -i -t ubuntu:12.10 /bin/bash
```
Starting a long-running worker process
--------------------------------------
```bash
# Run docker in daemon mode
(docker -d || echo "Docker daemon already running") &
# Start a very useful long-running process
JOB=$(docker run -d base /bin/sh -c "while true; do echo Hello world; sleep 1; done")
JOB=$(docker run -d ubuntu /bin/sh -c "while true; do echo Hello world; sleep 1; done")
# Collect the output of the job so far
docker logs $JOB
@@ -158,25 +120,32 @@ docker logs $JOB
docker kill $JOB
```
Listing all running containers
------------------------------
Running an irc bouncer
----------------------
```bash
docker ps
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
```
Running Redis
-------------
```bash
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
```
Share your own image!
---------------------
```bash
docker pull base
CONTAINER=$(docker run -d base apt-get install -y curl)
CONTAINER=$(docker run -d ubuntu:12.10 apt-get install -y curl)
docker commit -m "Installed curl" $CONTAINER $USER/betterbase
docker push $USER/betterbase
```
A list of publicly available images is [available here](https://github.com/dotcloud/docker/wiki/Public-docker-images).
Expose a service on a TCP port
------------------------------
@@ -197,6 +166,22 @@ echo hello world | nc $IP $PORT
echo "Daemon received: $(docker logs $JOB)"
```
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The [cgroup](http://blog.dotcloud.com/kernel-secrets-from-the-paas-garage-part-24-c) and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel;
* [AUFS](http://aufs.sourceforge.net/aufs.html), a powerful union filesystem with copy-on-write capabilities;
* The [Go](http://golang.org) programming language;
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
Contributing to Docker
======================

57
Vagrantfile vendored
View File

@@ -2,19 +2,12 @@
# vi: set ft=ruby :
def v10(config)
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
config.vm.share_folder "v-data", "/opt/go/src/github.com/dotcloud/docker", File.dirname(__FILE__)
# Ensure puppet is installed on the instance
config.vm.provision :shell, :inline => "apt-get -qq update; apt-get install -y puppet"
config.vm.provision :puppet do |puppet|
puppet.manifests_path = "puppet/manifests"
puppet.manifest_file = "quantal64.pp"
puppet.module_path = "puppet/modules"
end
# Install ubuntu packaging dependencies and create ubuntu packages
config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker'
end
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
@@ -30,11 +23,11 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
aws.region = "us-east-1"
aws.ami = "ami-ae9806c7"
aws.ami = "ami-d0f89fb9"
aws.ssh_username = "ubuntu"
aws.instance_type = "t1.micro"
end
@@ -51,7 +44,39 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
end
config.vm.provider :virtualbox do |vb|
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end
Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws, override|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
override.ssh.username = "ubuntu"
aws.region = "us-east-1"
aws.ami = "ami-d0f89fb9"
aws.instance_type = "t1.micro"
end
config.vm.provider :rackspace do |rs|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
rs.username = ENV["RS_USERNAME"]
rs.api_key = ENV["RS_API_KEY"]
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
rs.flavor = /512MB/
rs.image = /Ubuntu/
end
config.vm.provider :virtualbox do |vb|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
end
end

View File

@@ -4,6 +4,7 @@ import (
"errors"
"io"
"io/ioutil"
"os"
"os/exec"
)
@@ -86,3 +87,38 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
}
return pipeR, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
st, err := f.Stat()
if err != nil {
return nil, err
}
size := st.Size()
return &TempArchive{f, size}, nil
}
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience
}
func (archive *TempArchive) Read(data []byte) (int, error) {
n, err := archive.File.Read(data)
if err != nil {
os.Remove(archive.File.Name())
}
return n, err
}

20
buildbot/README.rst Normal file
View File

@@ -0,0 +1,20 @@
Buildbot
========
Buildbot is a continuous integration system designed to automate the
build/test cycle. By automatically rebuilding and testing the tree each time
something has changed, build problems are pinpointed quickly, before other
developers are inconvenienced by the failure.
When running 'make hack' at the docker root directory, it spawns a virtual
machine in the background running a buildbot instance and adds a git
post-commit hook that automatically run docker tests for you.
You can check your buildbot instance at http://192.168.33.21:8010/waterfall
Buildbot dependencies
---------------------
vagrant, virtualbox packages and python package requests

28
buildbot/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,28 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
$BUILDBOT_IP = '192.168.33.21'
def v10(config)
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
config.vm.share_folder 'v-data', '/data/docker', File.dirname(__FILE__) + '/..'
config.vm.network :hostonly, $BUILDBOT_IP
# Ensure puppet is installed on the instance
config.vm.provision :shell, :inline => 'apt-get -qq update; apt-get install -y puppet'
config.vm.provision :puppet do |puppet|
puppet.manifests_path = '.'
puppet.manifest_file = 'buildbot.pp'
puppet.options = ['--templatedir','.']
end
end
Vagrant::VERSION < '1.1.0' and Vagrant::Config.run do |config|
v10(config)
end
Vagrant::VERSION >= '1.1.0' and Vagrant.configure('1') do |config|
v10(config)
end

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Auto setup of buildbot configuration. Package installation is being done
# on buildbot.pp
# Dependencies: buildbot, buildbot-slave, supervisor
SLAVE_NAME='buildworker'
SLAVE_SOCKET='localhost:9989'
BUILDBOT_PWD='pass-docker'
USER='vagrant'
ROOT_PATH='/data/buildbot'
DOCKER_PATH='/data/docker'
BUILDBOT_CFG="$DOCKER_PATH/buildbot/buildbot-cfg"
IP=$(grep BUILDBOT_IP /data/docker/buildbot/Vagrantfile | awk -F "'" '{ print $2; }')
function run { su $USER -c "$1"; }
export PATH=/bin:sbin:/usr/bin:/usr/sbin:/usr/local/bin
# Exit if buildbot has already been installed
[ -d "$ROOT_PATH" ] && exit 0
# Setup buildbot
run "mkdir -p ${ROOT_PATH}"
cd ${ROOT_PATH}
run "buildbot create-master master"
run "cp $BUILDBOT_CFG/master.cfg master"
run "sed -i 's/localhost/$IP/' master/master.cfg"
run "buildslave create-slave slave $SLAVE_SOCKET $SLAVE_NAME $BUILDBOT_PWD"
# Allow buildbot subprocesses (docker tests) to properly run in containers,
# in particular with docker -u
run "sed -i 's/^umask = None/umask = 000/' ${ROOT_PATH}/slave/buildbot.tac"
# Setup supervisor
cp $BUILDBOT_CFG/buildbot.conf /etc/supervisor/conf.d/buildbot.conf
sed -i "s/^chmod=0700.*0700./chmod=0770\nchown=root:$USER/" /etc/supervisor/supervisord.conf
kill -HUP `pgrep -f "/usr/bin/python /usr/bin/supervisord"`
# Add git hook
cp $BUILDBOT_CFG/post-commit $DOCKER_PATH/.git/hooks
sed -i "s/localhost/$IP/" $DOCKER_PATH/.git/hooks/post-commit

View File

@@ -0,0 +1,18 @@
[program:buildmaster]
command=su vagrant -c "buildbot start master"
directory=/data/buildbot
chown= root:root
redirect_stderr=true
stdout_logfile=/var/log/supervisor/buildbot-master.log
stderr_logfile=/var/log/supervisor/buildbot-master.log
[program:buildworker]
command=buildslave start slave
directory=/data/buildbot
chown= root:root
redirect_stderr=true
stdout_logfile=/var/log/supervisor/buildbot-slave.log
stderr_logfile=/var/log/supervisor/buildbot-slave.log
[group:buildbot]
programs=buildmaster,buildworker

View File

@@ -0,0 +1,46 @@
import os
from buildbot.buildslave import BuildSlave
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
from buildbot.steps.shell import ShellCommand
from buildbot.status import html
from buildbot.status.web import authz, auth
PORT_WEB = 8010 # Buildbot webserver port
PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
TEST_USER = 'buildbot' # Credential to authenticate build triggers
TEST_PWD = 'docker' # Credential to authenticate build triggers
BUILDER_NAME = 'docker'
BUILDPASSWORD = 'pass-docker' # Credential to authenticate buildworkers
DOCKER_PATH = '/data/docker'
c = BuildmasterConfig = {}
c['title'] = "Docker"
c['titleURL'] = "waterfall"
c['buildbotURL'] = "http://localhost:{0}/".format(PORT_WEB)
c['db'] = {'db_url':"sqlite:///state.sqlite"}
c['slaves'] = [BuildSlave('buildworker', BUILDPASSWORD)]
c['slavePortnum'] = PORT_MASTER
c['schedulers'] = [ForceScheduler(name='trigger',builderNames=[BUILDER_NAME])]
# Docker test command
test_cmd = """(
cd {0}/..; rm -rf docker-tmp; git clone docker docker-tmp;
cd docker-tmp; make test; exit_status=$?;
cd ..; rm -rf docker-tmp; exit $exit_status)""".format(DOCKER_PATH)
# Builder
factory = BuildFactory()
factory.addStep(ShellCommand(description='Docker',logEnviron=False,
usePTY=True,command=test_cmd))
c['builders'] = [BuilderConfig(name=BUILDER_NAME,slavenames=['buildworker'],
factory=factory)]
# Status
authz_cfg=authz.Authz(auth=auth.BasicAuth([(TEST_USER,TEST_PWD)]),
forceBuild='auth')
c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env python
'''Trigger buildbot docker test build
post-commit git hook designed to automatically trigger buildbot on
the provided vagrant docker VM.'''
import requests
USERNAME = 'buildbot'
PASSWORD = 'docker'
BASE_URL = 'http://localhost:8010'
path = lambda s: BASE_URL + '/' + s
try:
session = requests.session()
session.post(path('login'),data={'username':USERNAME,'passwd':PASSWORD})
session.post(path('builders/docker/force'),
data={'forcescheduler':'trigger','reason':'Test commit'})
except:
pass

32
buildbot/buildbot.pp Normal file
View File

@@ -0,0 +1,32 @@
node default {
$USER = 'vagrant'
$ROOT_PATH = '/data/buildbot'
$DOCKER_PATH = '/data/docker'
exec {'apt_update': command => '/usr/bin/apt-get update' }
Package { require => Exec['apt_update'] }
group {'puppet': ensure => 'present'}
# Install dependencies
Package { ensure => 'installed' }
package { ['python-dev','python-pip','supervisor','lxc','bsdtar','git','golang']: }
file{[ '/data' ]:
owner => $USER, group => $USER, ensure => 'directory' }
file {'/var/tmp/requirements.txt':
content => template('requirements.txt') }
exec {'requirements':
require => [ Package['python-dev'], Package['python-pip'],
File['/var/tmp/requirements.txt'] ],
cwd => '/var/tmp',
command => "/bin/sh -c '(/usr/bin/pip install -r requirements.txt;
rm /var/tmp/requirements.txt)'" }
exec {'buildbot-cfg-sh':
require => [ Package['supervisor'], Exec['requirements']],
path => '/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin',
cwd => '/data',
command => "$DOCKER_PATH/buildbot/buildbot-cfg/buildbot-cfg.sh" }
}

View File

@@ -0,0 +1,6 @@
sqlalchemy<=0.7.9
sqlalchemy-migrate>=0.7.2
buildbot==0.8.7p1
buildbot_slave==0.8.7p1
nose==1.2.1
requests==1.1.0

View File

@@ -18,11 +18,10 @@ import (
"unicode"
)
const VERSION = "0.1.5"
const VERSION = "0.2.0"
var (
GIT_COMMIT string
NO_MEMORY_LIMIT bool
GIT_COMMIT string
)
func (srv *Server) Name() string {
@@ -184,10 +183,14 @@ func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string
// 'docker version': show version information
func (srv *Server) CmdVersion(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
fmt.Fprintf(stdout, "Version:%s\n", VERSION)
fmt.Fprintf(stdout, "Git Commit:%s\n", GIT_COMMIT)
if NO_MEMORY_LIMIT {
fmt.Fprintf(stdout, "Memory limit disabled\n")
fmt.Fprintf(stdout, "Version: %s\n", VERSION)
fmt.Fprintf(stdout, "Git Commit: %s\n", GIT_COMMIT)
fmt.Fprintf(stdout, "Kernel: %s\n", srv.runtime.kernelVersion)
if !srv.runtime.capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: No memory limit support\n")
}
if !srv.runtime.capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: No swap limit support\n")
}
return nil
}
@@ -223,7 +226,8 @@ func (srv *Server) CmdInfo(stdin io.ReadCloser, stdout io.Writer, args ...string
}
func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "stop", "CONTAINER [CONTAINER...]", "Stop a running container")
cmd := rcli.Subcmd(stdout, "stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container")
nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -233,7 +237,7 @@ func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string
}
for _, name := range cmd.Args() {
if container := srv.runtime.Get(name); container != nil {
if err := container.Stop(); err != nil {
if err := container.Stop(*nSeconds); err != nil {
return err
}
fmt.Fprintln(stdout, container.ShortId())
@@ -246,6 +250,7 @@ func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string
func (srv *Server) CmdRestart(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "restart", "CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int("t", 10, "wait t seconds before killing the container")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -255,7 +260,7 @@ func (srv *Server) CmdRestart(stdin io.ReadCloser, stdout io.Writer, args ...str
}
for _, name := range cmd.Args() {
if container := srv.runtime.Get(name); container != nil {
if err := container.Restart(); err != nil {
if err := container.Restart(*nSeconds); err != nil {
return err
}
fmt.Fprintln(stdout, container.ShortId())
@@ -470,9 +475,9 @@ func (srv *Server) CmdImport(stdin io.ReadCloser, stdout rcli.DockerConn, args .
if err != nil {
return err
}
archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout)
archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout, "Importing %v/%v (%v)")
}
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src)
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "")
if err != nil {
return err
}
@@ -675,7 +680,7 @@ func (srv *Server) CmdPs(stdin io.ReadCloser, stdout io.Writer, args ...string)
}
w := tabwriter.NewWriter(stdout, 12, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tCOMMENT")
fmt.Fprintln(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tCOMMENT\tPORTS")
}
for i, container := range srv.runtime.List() {
if !container.State.Running && !*flAll && *nLast == -1 {
@@ -696,6 +701,7 @@ func (srv *Server) CmdPs(stdin io.ReadCloser, stdout io.Writer, args ...string)
/* CREATED */ HumanDuration(time.Now().Sub(container.Created)) + " ago",
/* STATUS */ container.State.String(),
/* COMMENT */ "",
/* PORTS */ container.NetworkSettings.PortMappingHuman(),
} {
if idx == 0 {
w.Write([]byte(field))
@@ -719,6 +725,7 @@ func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...stri
"commit", "[OPTIONS] CONTAINER [REPOSITORY [TAG]]",
"Create a new image from a container's changes")
flComment := cmd.String("m", "", "Commit message")
flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -727,7 +734,7 @@ func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...stri
cmd.Usage()
return nil
}
img, err := srv.runtime.Commit(containerName, repository, tag, *flComment)
img, err := srv.runtime.Commit(containerName, repository, tag, *flComment, *flAuthor)
if err != nil {
return err
}
@@ -829,6 +836,10 @@ func (srv *Server) CmdAttach(stdin io.ReadCloser, stdout rcli.DockerConn, args .
return fmt.Errorf("No such container: %s", name)
}
if container.State.Ghost {
return fmt.Errorf("Impossible to attach to a ghost container")
}
if container.Config.Tty {
stdout.SetOptionRawTerminal()
}
@@ -906,7 +917,7 @@ func (srv *Server) CmdTag(stdin io.ReadCloser, stdout io.Writer, args ...string)
}
func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error {
config, err := ParseRun(args, stdout)
config, err := ParseRun(args, stdout, srv.runtime.capabilities)
if err != nil {
return err
}
@@ -973,6 +984,12 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s
Debugf("Waiting for attach to return\n")
<-attachErr
// Expecting I/O pipe error, discarding
// If we are in stdinonce mode, wait for the process to end
// otherwise, simply return
if config.StdinOnce && !config.Tty {
container.Wait()
}
return nil
}

View File

@@ -228,6 +228,21 @@ func TestRunDisconnectTty(t *testing.T) {
close(c1)
}()
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
for {
// Client disconnect after run -i should keep stdin out in TTY mode
l := runtime.List()
if len(l) == 1 && l[0].State.Running {
break
}
time.Sleep(10 * time.Millisecond)
}
})
// Client disconnect after run -i should keep stdin out in TTY mode
container := runtime.List()[0]
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
t.Fatal(err)
@@ -239,14 +254,9 @@ func TestRunDisconnectTty(t *testing.T) {
t.Fatal(err)
}
// as the pipes are close, we expect the process to die,
// therefore CmdRun to unblock. Wait for CmdRun
setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() {
<-c1
})
// In tty mode, we expect the process to stay alive even after client's stdin closes.
// Do not wait for run to finish
// Client disconnect after run -i should keep stdin out in TTY mode
container := runtime.List()[0]
// Give some time to monitor to do his thing
container.WaitTimeout(500 * time.Millisecond)
if !container.State.Running {

View File

@@ -11,7 +11,9 @@ import (
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"syscall"
"time"
)
@@ -66,7 +68,7 @@ type Config struct {
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
}
func ParseRun(args []string, stdout io.Writer) (*Config, error) {
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
if len(args) > 0 && args[0] != "--help" {
cmd.SetOutput(ioutil.Discard)
@@ -81,8 +83,8 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) {
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
if *flMemory > 0 && NO_MEMORY_LIMIT {
fmt.Fprintf(stdout, "WARNING: This version of docker has been compiled without memory limit support. Discarding -m.")
if *flMemory > 0 && !capabilities.MemoryLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
*flMemory = 0
}
@@ -135,6 +137,12 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) {
Dns: flDns,
Image: image,
}
if *flMemory > 0 && !capabilities.SwapLimit {
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
@@ -150,6 +158,16 @@ type NetworkSettings struct {
PortMapping map[string]string
}
// String returns a human-readable description of the port mapping defined in the settings
func (settings *NetworkSettings) PortMappingHuman() string {
var mapping []string
for private, public := range settings.PortMapping {
mapping = append(mapping, fmt.Sprintf("%s->%s", public, private))
}
sort.Strings(mapping)
return strings.Join(mapping, ", ")
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
@@ -367,10 +385,15 @@ func (container *Container) Start() error {
return err
}
if container.Config.Memory > 0 && NO_MEMORY_LIMIT {
log.Printf("WARNING: This version of docker has been compiled without memory limit support. Discarding the limit.")
// Make sure the config is compatible with the current kernel
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
container.Config.Memory = 0
}
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
container.Config.MemorySwap = -1
}
if err := container.generateLXCConfig(); err != nil {
return err
@@ -507,16 +530,42 @@ func (container *Container) releaseNetwork() {
container.NetworkSettings = &NetworkSettings{}
}
// FIXME: replace this with a control socket within docker-init
func (container *Container) waitLxc() error {
for {
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
return err
} else {
if !strings.Contains(string(output), "RUNNING") {
return nil
}
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (container *Container) monitor() {
// Wait for the program to exit
Debugf("Waiting for process")
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
// If the command does not exists, try to wait via lxc
if container.cmd == nil {
if err := container.waitLxc(); err != nil {
Debugf("%s: Process: %s", container.Id, err)
}
} else {
if err := container.cmd.Wait(); err != nil {
// Discard the error as any signals or non 0 returns will generate an error
Debugf("%s: Process: %s", container.Id, err)
}
}
Debugf("Process finished")
exitCode := container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
var exitCode int = -1
if container.cmd != nil {
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
}
// Cleanup
container.releaseNetwork()
@@ -565,7 +614,7 @@ func (container *Container) monitor() {
}
func (container *Container) kill() error {
if !container.State.Running || container.cmd == nil {
if !container.State.Running {
return nil
}
@@ -577,6 +626,9 @@ func (container *Container) kill() error {
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
if container.cmd == nil {
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
}
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
if err := container.cmd.Process.Kill(); err != nil {
return err
@@ -594,21 +646,15 @@ func (container *Container) Kill() error {
if !container.State.Running {
return nil
}
if container.State.Ghost {
return fmt.Errorf("Can't kill ghost container")
}
return container.kill()
}
func (container *Container) Stop() error {
func (container *Container) Stop(seconds int) error {
container.State.lock()
defer container.State.unlock()
if !container.State.Running {
return nil
}
if container.State.Ghost {
return fmt.Errorf("Can't stop ghot container")
}
// 1. Send a SIGTERM
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
@@ -620,8 +666,8 @@ func (container *Container) Stop() error {
}
// 2. Wait for the process to exit on its own
if err := container.WaitTimeout(10 * time.Second); err != nil {
log.Printf("Container %v failed to exit within 10 seconds of SIGTERM - using the force", container.Id)
if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.Id, seconds)
if err := container.kill(); err != nil {
return err
}
@@ -629,8 +675,8 @@ func (container *Container) Stop() error {
return nil
}
func (container *Container) Restart() error {
if err := container.Stop(); err != nil {
func (container *Container) Restart(seconds int) error {
if err := container.Stop(seconds); err != nil {
return err
}
if err := container.Start(); err != nil {

View File

@@ -97,7 +97,7 @@ func TestMultipleAttachRestart(t *testing.T) {
t.Fatalf("Unexpected output. Expected [%s], received [%s]", "hello", l3)
}
if err := container.Stop(); err != nil {
if err := container.Stop(10); err != nil {
t.Fatal(err)
}
@@ -150,6 +150,82 @@ func TestMultipleAttachRestart(t *testing.T) {
}
}
func TestDiff(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(runtime)
// Create a container and remove a file
container1, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/rm", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container1)
if err := container1.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err := container1.Changes()
if err != nil {
t.Fatal(err)
}
success := false
for _, elem := range c {
if elem.Path == "/etc/passwd" && elem.Kind == 2 {
success = true
}
}
if !success {
t.Fatalf("/etc/passwd as been removed but is not present in the diff")
}
// Commit the container
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "")
if err != nil {
t.Error(err)
}
// Create a new container from the commited image
container2, err := runtime.Create(
&Config{
Image: img.Id,
Cmd: []string{"cat", "/etc/passwd"},
},
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container2)
if err := container2.Run(); err != nil {
t.Fatal(err)
}
// Check the changelog
c, err = container2.Changes()
if err != nil {
t.Fatal(err)
}
for _, elem := range c {
if elem.Path == "/etc/passwd" {
t.Fatalf("/etc/passwd should not be present in the diff after commit.")
}
}
}
func TestCommitRun(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
@@ -182,7 +258,7 @@ func TestCommitRun(t *testing.T) {
if err != nil {
t.Error(err)
}
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image")
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image", "")
if err != nil {
t.Error(err)
}

View File

@@ -96,7 +96,7 @@ def main():
else:
print "Skipping uknown op " + op
except:
docker(["rmi"] + steps)
docker(["rmi"] + steps[1:])
raise
print base

View File

@@ -0,0 +1,3 @@
# Vagrant-docker
This is a placeholder for the official vagrant-docker, a plugin for Vagrant (http://vagrantup.com) which exposes Docker as a provider.

View File

@@ -7,15 +7,16 @@ import (
"github.com/dotcloud/docker/rcli"
"github.com/dotcloud/docker/term"
"io"
"io/ioutil"
"log"
"os"
"os/signal"
"strconv"
"syscall"
)
var (
GIT_COMMIT string
NO_MEMORY_LIMIT string
GIT_COMMIT string
)
func main() {
@@ -39,15 +40,11 @@ func main() {
os.Setenv("DEBUG", "1")
}
docker.GIT_COMMIT = GIT_COMMIT
docker.NO_MEMORY_LIMIT = NO_MEMORY_LIMIT == "1"
if *flDaemon {
if flag.NArg() != 0 {
flag.Usage()
return
}
if NO_MEMORY_LIMIT == "1" {
log.Printf("WARNING: This version of docker has been compiled without memory limit support.")
}
if err := daemon(*pidfile); err != nil {
log.Fatal(err)
}
@@ -59,8 +56,13 @@ func main() {
}
func createPidFile(pidfile string) error {
if _, err := os.Stat(pidfile); err == nil {
return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
if pidString, err := ioutil.ReadFile(pidfile); err == nil {
pid, err := strconv.Atoi(string(pidString))
if err == nil {
if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
}
}
}
file, err := os.Create(pidfile)

View File

@@ -51,6 +51,7 @@ docs:
cp sources/dotcloud.yml $(BUILDDIR)/html/
cp sources/CNAME $(BUILDDIR)/html/
cp sources/.nojekyll $(BUILDDIR)/html/
cp sources/nginx.conf $(BUILDDIR)/html/
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."

View File

@@ -10,7 +10,7 @@ Building blocks
Images
------
An original container image. These are stored on disk and are comparable with what you normally expect from a stoppped virtual machine image. Images are stored (and retrieved from) repository
An original container image. These are stored on disk and are comparable with what you normally expect from a stopped virtual machine image. Images are stored (and retrieved from) repository
Images are stored on your local file system under /var/lib/docker/images

View File

@@ -49,7 +49,7 @@ Save the changed we just made in the container to a new image called "_/builds/g
WEB_WORKER=$(docker run -d -p 5000 $BUILD_IMG /usr/local/bin/runapp)
- **"docker run -d "** run a command in a new container. We pass "-d" so it runs as a daemon.
**"-p 5000"* the web app is going to listen on this port, so it must be mapped from the container to the host system.
- **"-p 5000"** the web app is going to listen on this port, so it must be mapped from the container to the host system.
- **"$BUILD_IMG"** is the image we want to run the command inside of.
- **/usr/local/bin/runapp** is the command which starts the web app.

View File

@@ -7,27 +7,16 @@
Running The Examples
--------------------
There are two ways to run docker, daemon mode and standalone mode.
When you run the docker command it will first check if there is a docker daemon running in the background it can connect to.
* If it exists it will use that daemon to run all of the commands.
* If it does not exist docker will run in standalone mode (docker will exit after each command).
Docker needs to be run from a privileged account (root).
1. The most common (and recommended) way is to run a docker daemon as root in the background, and then connect to it from the docker client from any account.
All the examples assume your machine is running the docker daemon. To run the docker daemon in the background, simply type:
.. code-block:: bash
# starting docker daemon in the background
sudo docker -d &
# now you can run docker commands from any account.
docker <command>
2. Standalone: You need to run every command as root, or using sudo
Now you can run docker in client mode: all commands will be forwarded to the docker daemon, so the client
can run from any account.
.. code-block:: bash
sudo docker <command>
# now you can run docker commands from any account.
docker help

View File

@@ -71,34 +71,40 @@
<h2>
<a name="installing-on-ubuntu-1204-and-1210" class="anchor" href="#installing-on-ubuntu-1204-and-1210"><span class="mini-icon mini-icon-link"></span>
</a>Installing on Ubuntu</h2>
<p><strong>Requirements</strong></p>
<ul>
<li>Ubuntu 12.04 (LTS) (64-bit)</li>
<li> or Ubuntu 12.10 (quantal) (64-bit)</li>
</ul>
<ol>
<li>
<p>Install dependencies:</p>
<p><strong>Install dependencies</strong></p>
The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
<pre>sudo apt-get install linux-image-extra-`uname -r`</pre>
<div class="highlight">
<pre>sudo apt-get install lxc wget bsdtar curl</pre>
<pre>sudo apt-get install linux-image-extra-<span class="sb">`</span>uname -r<span class="sb">`</span></pre></div>
<p>The <code>linux-image-extra</code> package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.</p>
</li>
<li>
<p>Install the latest docker binary:</p>
<p><strong>Install Docker</strong></p>
<p>Add the Ubuntu PPA (Personal Package Archive) sources to your apt sources list, update and install.</p>
<p>You may see some warnings that the GPG keys cannot be verified.</p>
<div class="highlight">
<pre>sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"</pre>
<pre>sudo apt-get update</pre>
<pre>sudo apt-get install lxc-docker</pre>
</div>
</li>
<li>
<p><strong>Run!</strong></p>
<div class="highlight">
<pre>wget http://get.docker.io/builds/<span class="k">$(</span>uname -s<span class="k">)</span>/<span class="k">$(</span>uname -m<span class="k">)</span>/docker-master.tgz</pre>
<pre>tar -xf docker-master.tgz</pre>
<pre>docker run -i -t ubuntu /bin/bash</pre>
</div>
</li>
<li>
<p>Run your first container!</p>
<div class="highlight"><pre><span class="nb">cd </span>docker-master</pre>
<pre>sudo ./docker run -i -t base /bin/bash</pre>
</div>
<p>Done!</p>
<p>Consider adding docker to your <code>PATH</code> for simplicity.</p>
</li>
Continue with the <a href="http://docs.docker.io/en/latest/examples/hello_world/">Hello world</a> example.
</ol>
</section>
@@ -117,7 +123,7 @@
vagrant and an Ubuntu virtual machine.</strong></p>
<ul>
<li><a href="http://docs.docker.io/en/latest/installation/macos/">Mac OS X and other linuxes</a></li>
<li><a href="http://docs.docker.io/en/latest/installation/vagrant/">Mac OS X and other linuxes</a></li>
<li><a href="http://docs.docker.io/en/latest/installation/windows/">Windows</a></li>
</ul>

View File

@@ -1,8 +1,9 @@
Amazon EC2
==========
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
may be out of date because it depends on some binaries to be updated and published
Please note this is a community contributed installation path. The only 'official' installation is using the
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
Installation
------------
@@ -17,7 +18,7 @@ Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant
vagrant plugin install vagrant-aws
3. Get the docker sources, this will give you the latest Vagrantfile and puppet manifests.
3. Get the docker sources, this will give you the latest Vagrantfile.
::

View File

@@ -3,19 +3,23 @@
Arch Linux
==========
Please note this is a community contributed installation path. The only 'official' installation is using the
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
Installing on Arch Linux is not officially supported but can be handled via
either of the following AUR packages:
* `dotcloud-docker <https://aur.archlinux.org/packages/dotcloud-docker/>`_
* `dotcloud-docker-git <https://aur.archlinux.org/packages/dotcloud-docker-git/>`_
* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
The dotcloud-docker package will install the latest tagged version of docker.
The dotcloud-docker-git package will build from the current master branch.
The lxc-docker package will install the latest tagged version of docker.
The lxc-docker-git package will build from the current master branch.
Dependencies
------------
Docker depends on several packages which will be installed automatically with
Docker depends on several packages which are specified as dependencies in
either AUR package.
* aufs3
@@ -23,6 +27,7 @@ either AUR package.
* go
* iproute2
* linux-aufs_friendly
* lxc
Installation
------------
@@ -37,7 +42,24 @@ new kernel will be compiled and this can take quite a while.
::
yaourt -S dotcloud-docker-git
yaourt -S lxc-docker-git
Starting Docker
---------------
Prior to starting docker modify your bootloader to use the
**linux-aufs_friendly** kernel and reboot your system.
There is a systemd service unit created for docker. To start the docker service:
::
sudo systemctl start docker
To start on system boot:
::
sudo systemctl enable docker

View File

@@ -0,0 +1,53 @@
.. _binaries:
Binaries
========
**Please note this project is currently under heavy development. It should not be used in production.**
Right now, the officially supported distributions are:
- Ubuntu 12.04 (precise LTS) (64-bit)
- Ubuntu 12.10 (quantal) (64-bit)
Install dependencies:
---------------------
::
sudo apt-get install lxc bsdtar
sudo apt-get install linux-image-extra-`uname -r`
The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
Install the docker binary:
::
wget http://get.docker.io/builds/Linux/x86_64/docker-master.tgz
tar -xf docker-master.tgz
sudo cp ./docker-master /usr/local/bin
Note: docker currently only supports 64-bit Linux hosts.
Run the docker daemon
---------------------
::
sudo docker -d &
Run your first container!
-------------------------
::
docker run -i -t ubuntu /bin/bash
Continue with the :ref:`hello_world` example.

View File

@@ -13,8 +13,9 @@ Contents:
:maxdepth: 1
ubuntulinux
binaries
archlinux
macos
vagrant
windows
amazon
upgrading

View File

@@ -1,66 +0,0 @@
Mac OS X and other linux
========================
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
may be out of date because it depends on some binaries to be updated and published
Requirements
------------
We currently rely on some Ubuntu-linux specific packages, this will change in the future, but for now we provide a
streamlined path to install Virtualbox with a Ubuntu 12.10 image using Vagrant.
1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
3. Install git if you had not installed it before, check if it is installed by running
``git`` in a terminal window
We recommend having at least about 2Gb of free disk space and 2Gb RAM (or more).
Installation
------------
1. Fetch the docker sources
.. code-block:: bash
git clone https://github.com/dotcloud/docker.git
2. Run vagrant from the sources directory
.. code-block:: bash
vagrant up
Vagrant will:
* Download the Quantal64 base ubuntu virtual machine image from get.docker.io/
* Boot this image in virtualbox
Then it will use Puppet to perform an initial setup in this machine:
* Download & untar the most recent docker binary tarball to vagrant homedir.
* Debootstrap to /var/lib/docker/images/ubuntu.
* Install & run dockerd as service.
* Put docker in /usr/local/bin.
* Put latest Go toolchain in /usr/local/go.
You now have a Ubuntu Virtual Machine running with docker pre-installed.
To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
``vagrant up``. Vagrant will make sure to connect you to the correct VM.
.. code-block:: bash
vagrant ssh
Now you are in the VM, run docker
.. code-block:: bash
docker
Continue with the :ref:`hello_world` example.

View File

@@ -6,51 +6,56 @@ Ubuntu Linux
**Please note this project is currently under heavy development. It should not be used in production.**
Installing on Ubuntu 12.04 and 12.10
Right now, the officially supported distributions are:
Ubuntu 12.04 (precise LTS)
Ubuntu 12.10 (quantal)
Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested.
- Ubuntu 12.04 (precise LTS) (64-bit)
- Ubuntu 12.10 (quantal) (64-bit)
Install dependencies:
---------------------
Dependencies
------------
::
The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
sudo apt-get install lxc wget bsdtar curl
sudo apt-get install linux-image-extra-`uname -r`
.. code-block:: bash
The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
Install the latest docker binary:
::
wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
tar -xf docker-master.tgz
Run your first container!
::
cd docker-master
::
sudo ./docker run -i -t base /bin/bash
sudo apt-get install linux-image-extra-`uname -r`
To run docker as a daemon, in the background, and allow non-root users to run ``docker`` start
docker -d
Installation
------------
::
sudo ./docker -d &
Docker is available as a Ubuntu PPA (Personal Package Archive),
`hosted on launchpad <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
which makes installing Docker on Ubuntu very easy.
Consider adding docker to your PATH for simplicity.
Continue with the :ref:`hello_world` example.
Add the custom package sources to your apt sources list. Copy and paste the following lines at once.
.. code-block:: bash
sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"
Update your sources. You will see a warning that GPG signatures cannot be verified.
.. code-block:: bash
sudo apt-get update
Now install it, you will see another warning that the package cannot be authenticated. Confirm install.
.. code-block:: bash
sudo apt-get install lxc-docker
Verify it worked
.. code-block:: bash
docker
**Done!**, now continue with the :ref:`hello_world` example.

View File

@@ -3,7 +3,8 @@
Upgrading
============
We assume you are upgrading from within the operating system which runs your docker daemon.
These instructions are for upgrading your Docker binary for when you had a custom (non package manager) installation.
If you istalled docker using apt-get, use that to upgrade.
Get the latest docker binary:

View File

@@ -0,0 +1,70 @@
.. _install_using_vagrant:
Using Vagrant
=============
Please note this is a community contributed installation path. The only 'official' installation is using the
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
**Requirements:**
This guide will setup a new virtual machine with docker installed on your computer. This works on most operating
systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM
to spare you should be good.
Install Vagrant and Virtualbox
------------------------------
1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
3. Install git if you had not installed it before, check if it is installed by running
``git`` in a terminal window
Spin it up
----------
1. Fetch the docker sources (this includes the Vagrantfile for machine setup).
.. code-block:: bash
git clone https://github.com/dotcloud/docker.git
2. Run vagrant from the sources directory
.. code-block:: bash
vagrant up
Vagrant will:
* Download the 'official' Precise64 base ubuntu virtual machine image from vagrantup.com
* Boot this image in virtualbox
* Add the `Docker PPA sources <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_ to /etc/apt/sources.lst
* Update your sources
* Install lxc-docker
You now have a Ubuntu Virtual Machine running with docker pre-installed.
Connect
-------
To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
``vagrant up``. Vagrant will connect you to the correct VM.
.. code-block:: bash
vagrant ssh
Run
-----
Now you are in the VM, run docker
.. code-block:: bash
docker
Continue with the :ref:`hello_world` example.

View File

@@ -3,8 +3,8 @@
:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
Windows
=========
Windows (with Vagrant)
======================
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
may be out of date because it depends on some binaries to be updated and published

6
docs/sources/nginx.conf Normal file
View File

@@ -0,0 +1,6 @@
# rule to redirect original links created when hosted on github pages
rewrite ^/documentation/(.*).html http://docs.docker.io/en/latest/$1/ permanent;
# rewrite the stuff which was on the current page
rewrite ^/gettingstarted.html$ /gettingstarted/ permanent;

View File

@@ -0,0 +1,9 @@
package docker
import (
"fmt"
)
func getKernelVersion() (*KernelVersionInfo, error) {
return nil, fmt.Errorf("Kernel version detection is not available on darwin")
}

69
getKernelVersion_linux.go Normal file
View File

@@ -0,0 +1,69 @@
package docker
import (
"bytes"
"strconv"
"strings"
"syscall"
)
func getKernelVersion() (*KernelVersionInfo, error) {
var (
uts syscall.Utsname
flavor string
kernel, major, minor int
err error
)
if err := syscall.Uname(&uts); err != nil {
return nil, err
}
release := make([]byte, len(uts.Release))
i := 0
for _, c := range uts.Release {
release[i] = byte(c)
i++
}
// Remove the \x00 from the release for Atoi to parse correctly
release = release[:bytes.IndexByte(release, 0)]
tmp := strings.SplitN(string(release), "-", 2)
tmp2 := strings.SplitN(tmp[0], ".", 3)
if len(tmp2) > 0 {
kernel, err = strconv.Atoi(tmp2[0])
if err != nil {
return nil, err
}
}
if len(tmp2) > 1 {
major, err = strconv.Atoi(tmp2[1])
if err != nil {
return nil, err
}
}
if len(tmp2) > 2 {
minor, err = strconv.Atoi(tmp2[2])
if err != nil {
return nil, err
}
}
if len(tmp) == 2 {
flavor = tmp[1]
} else {
flavor = ""
}
return &KernelVersionInfo{
Kernel: kernel,
Major: major,
Minor: minor,
Flavor: flavor,
}, nil
}

View File

@@ -2,6 +2,7 @@ package docker
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
@@ -83,12 +84,13 @@ func (graph *Graph) Get(name string) (*Image, error) {
}
// Create creates a new image and registers it in the graph.
func (graph *Graph) Create(layerData Archive, container *Container, comment string) (*Image, error) {
func (graph *Graph) Create(layerData Archive, container *Container, comment, author string) (*Image, error) {
img := &Image{
Id: GenerateId(),
Comment: comment,
Created: time.Now(),
DockerVersion: VERSION,
Author: author,
}
if container != nil {
img.Parent = container.Image
@@ -128,12 +130,32 @@ func (graph *Graph) Register(layerData Archive, img *Image) error {
return nil
}
// TempLayerArchive creates a temporary archive of the given image's filesystem layer.
// The archive is stored on disk and will be automatically deleted as soon as has been read.
// If output is not nil, a human-readable progress bar will be written to it.
// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives?
func (graph *Graph) TempLayerArchive(id string, compression Compression, output io.Writer) (*TempArchive, error) {
image, err := graph.Get(id)
if err != nil {
return nil, err
}
tmp, err := graph.tmp()
if err != nil {
return nil, err
}
archive, err := image.TarLayer(compression)
if err != nil {
return nil, err
}
return NewTempArchive(ProgressReader(ioutil.NopCloser(archive), 0, output, "Buffering to disk %v/%v (%v)"), tmp.Root)
}
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
func (graph *Graph) Mktemp(id string) (string, error) {
if id == "" {
id = GenerateId()
}
tmp, err := NewGraph(path.Join(graph.Root, ":tmp:"))
tmp, err := graph.tmp()
if err != nil {
return "", fmt.Errorf("Couldn't create temp: %s", err)
}
@@ -143,6 +165,10 @@ func (graph *Graph) Mktemp(id string) (string, error) {
return tmp.imageRoot(id), nil
}
func (graph *Graph) tmp() (*Graph, error) {
return NewGraph(path.Join(graph.Root, ":tmp:"))
}
// Check if given error is "not empty".
// Note: this is the way golang does it internally with os.IsNotExists.
func isNotEmpty(err error) bool {

View File

@@ -62,7 +62,7 @@ func TestGraphCreate(t *testing.T) {
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing")
image, err := graph.Create(archive, nil, "Testing", "")
if err != nil {
t.Fatal(err)
}
@@ -122,7 +122,7 @@ func TestMount(t *testing.T) {
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing")
image, err := graph.Create(archive, nil, "Testing", "")
if err != nil {
t.Fatal(err)
}
@@ -166,7 +166,7 @@ func createTestImage(graph *Graph, t *testing.T) *Image {
if err != nil {
t.Fatal(err)
}
img, err := graph.Create(archive, nil, "Test image")
img, err := graph.Create(archive, nil, "Test image", "")
if err != nil {
t.Fatal(err)
}
@@ -181,7 +181,7 @@ func TestDelete(t *testing.T) {
t.Fatal(err)
}
assertNImages(graph, t, 0)
img, err := graph.Create(archive, nil, "Bla bla")
img, err := graph.Create(archive, nil, "Bla bla", "")
if err != nil {
t.Fatal(err)
}
@@ -192,11 +192,11 @@ func TestDelete(t *testing.T) {
assertNImages(graph, t, 0)
// Test 2 create (same name) / 1 delete
img1, err := graph.Create(archive, nil, "Testing")
img1, err := graph.Create(archive, nil, "Testing", "")
if err != nil {
t.Fatal(err)
}
if _, err = graph.Create(archive, nil, "Testing"); err != nil {
if _, err = graph.Create(archive, nil, "Testing", ""); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 2)

View File

@@ -0,0 +1,17 @@
# This will build a container capable of producing an official binary build of docker and
# uploading it to S3
from ubuntu:12.10
run apt-get update
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd
# Packages required to checkout and build docker
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q build-essential
# Packages required to build an ubuntu package
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q debhelper
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q autotools-dev
copy fake_initctl /usr/local/bin/initctl
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q devscripts
copy dockerbuilder /usr/local/bin/dockerbuilder
copy s3cfg /.s3cfg
# run $img dockerbuilder $REVISION_OR_TAG $S3_ID $S3_KEY

View File

@@ -0,0 +1,53 @@
#!/bin/sh
set -x
set -e
PACKAGE=github.com/dotcloud/docker
if [ $# -gt 1 ]; then
echo "Usage: $0 [REVISION]"
exit 1
fi
export REVISION=$1
if [ -z "$AWS_ID" ]; then
echo "Warning: environment variable AWS_ID is not set. Won't upload to S3."
NO_S3=1
fi
if [ -z "$AWS_KEY" ]; then
echo "Warning: environment variable AWS_KEY is not set. Won't upload to S3."
NO_S3=1
fi
if [ -z "$GPG_KEY" ]; then
echo "Warning: environment variable GPG_KEY is not set. Ubuntu package upload will not succeed."
NO_UBUNTU=1
fi
if [ -z "$REVISION" ]; then
rm -fr docker-master
git clone https://github.com/dotcloud/docker docker-master
cd docker-master
else
rm -fr docker-$REVISION
git init docker-$REVISION
cd docker-$REVISION
git fetch -t https://github.com/dotcloud/docker $REVISION
git reset --hard FETCH_HEAD
fi
if [ -z "$REVISION" ]; then
make release
else
make release RELEASE_VERSION=$REVISION
fi
if [ -z "$NO_S3" ]; then
s3cmd -P put docker-$REVISION.tgz s3://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-$REVISION.tgz
fi
if [ -z "$NO_UBUNTU" ]; then
(cd packaging/ubuntu && make ubuntu)
fi

View File

@@ -0,0 +1,3 @@
#!/bin/sh
echo Whatever you say, man

3
hack/dockerbuilder/s3cfg Normal file
View File

@@ -0,0 +1,3 @@
[default]
access_key = $AWS_ID
secret_key = $AWS_KEY

View File

@@ -23,6 +23,7 @@ type Image struct {
Container string `json:"container,omitempty"`
ContainerConfig Config `json:"container_config,omitempty"`
DockerVersion string `json:"docker_version,omitempty"`
Author string `json:"author,omitempty"`
graph *Graph
}
@@ -91,7 +92,7 @@ func MountAUFS(ro []string, rw string, target string) error {
rwBranch := fmt.Sprintf("%v=rw", rw)
roBranches := ""
for _, layer := range ro {
roBranches += fmt.Sprintf("%v=ro:", layer)
roBranches += fmt.Sprintf("%v=ro+wh:", layer)
}
branches := fmt.Sprintf("br:%v:%v", rwBranch, roBranches)
@@ -109,6 +110,15 @@ func MountAUFS(ro []string, rw string, target string) error {
return nil
}
// TarLayer returns a tar archive of the image's filesystem layer.
func (image *Image) TarLayer(compression Compression) (Archive, error) {
layerPath, err := image.layer()
if err != nil {
return nil, err
}
return Tar(layerPath, compression)
}
func (image *Image) Mount(root, rw string) error {
if mounted, err := Mounted(root); err != nil {
return err
@@ -126,34 +136,9 @@ func (image *Image) Mount(root, rw string) error {
if err := os.Mkdir(rw, 0755); err != nil && !os.IsExist(err) {
return err
}
// FIXME: @creack shouldn't we do this after going over changes?
if err := MountAUFS(layers, rw, root); err != nil {
return err
}
// FIXME: Create tests for deletion
// FIXME: move this part to change.go
// Retrieve the changeset from the parent and apply it to the container
// - Retrieve the changes
changes, err := Changes(layers, layers[0])
if err != nil {
return err
}
// Iterate on changes
for _, c := range changes {
// If there is a delete
if c.Kind == ChangeDelete {
// Make sure the directory exists
file_path, file_name := path.Dir(c.Path), path.Base(c.Path)
if err := os.MkdirAll(path.Join(rw, file_path), 0755); err != nil {
return err
}
// And create the whiteout (we just need to create empty file, discard the return)
if _, err := os.Create(path.Join(path.Join(rw, file_path),
".wh."+path.Base(file_name))); err != nil {
return err
}
}
}
return nil
}

View File

@@ -4,6 +4,7 @@ import (
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"net"
"os/exec"
@@ -183,18 +184,21 @@ func getIfaceAddr(name string) (net.Addr, error) {
// It keeps track of all mappings and is able to unmap at will
type PortMapper struct {
mapping map[int]net.TCPAddr
proxies map[int]net.Listener
}
func (mapper *PortMapper) cleanup() error {
// Ignore errors - This could mean the chains were never set up
iptables("-t", "nat", "-D", "PREROUTING", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER")
iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER")
iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8", "-j", "DOCKER")
iptables("-t", "nat", "-D", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER") // Created in versions <= 0.1.6
// Also cleanup rules created by older versions, or -X might fail.
iptables("-t", "nat", "-D", "PREROUTING", "-j", "DOCKER")
iptables("-t", "nat", "-D", "OUTPUT", "-j", "DOCKER")
iptables("-t", "nat", "-F", "DOCKER")
iptables("-t", "nat", "-X", "DOCKER")
mapper.mapping = make(map[int]net.TCPAddr)
mapper.proxies = make(map[int]net.Listener)
return nil
}
@@ -205,7 +209,7 @@ func (mapper *PortMapper) setup() error {
if err := iptables("-t", "nat", "-A", "PREROUTING", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER"); err != nil {
return fmt.Errorf("Failed to inject docker in PREROUTING chain: %s", err)
}
if err := iptables("-t", "nat", "-A", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "-j", "DOCKER"); err != nil {
if err := iptables("-t", "nat", "-A", "OUTPUT", "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8", "-j", "DOCKER"); err != nil {
return fmt.Errorf("Failed to inject docker in OUTPUT chain: %s", err)
}
return nil
@@ -220,15 +224,64 @@ func (mapper *PortMapper) Map(port int, dest net.TCPAddr) error {
if err := mapper.iptablesForward("-A", port, dest); err != nil {
return err
}
mapper.mapping[port] = dest
listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
mapper.Unmap(port)
return err
}
mapper.proxies[port] = listener
go proxy(listener, "tcp", dest.String())
return nil
}
// proxy listens for socket connections on `listener`, and forwards them unmodified
// to `proto:address`
func proxy(listener net.Listener, proto, address string) error {
Debugf("proxying to %s:%s", proto, address)
defer Debugf("Done proxying to %s:%s", proto, address)
for {
Debugf("Listening on %s", listener)
src, err := listener.Accept()
if err != nil {
return err
}
Debugf("Connecting to %s:%s", proto, address)
dst, err := net.Dial(proto, address)
if err != nil {
log.Printf("Error connecting to %s:%s: %s", proto, address, err)
src.Close()
continue
}
Debugf("Connected to backend, splicing")
splice(src, dst)
}
return nil
}
func halfSplice(dst, src net.Conn) error {
_, err := io.Copy(dst, src)
// FIXME: on EOF from a tcp connection, pass WriteClose()
dst.Close()
src.Close()
return err
}
func splice(a, b net.Conn) {
go halfSplice(a, b)
go halfSplice(b, a)
}
func (mapper *PortMapper) Unmap(port int) error {
dest, ok := mapper.mapping[port]
if !ok {
return errors.New("Port is not mapped")
}
if proxy, exists := mapper.proxies[port]; exists {
proxy.Close()
delete(mapper.proxies, port)
}
if err := mapper.iptablesForward("-D", port, dest); err != nil {
return err
}

View File

@@ -0,0 +1,25 @@
Docker on Arch
==============
The AUR lxc-docker and lxc-docker-git packages handle building docker on Arch
linux. The PKGBUILD specifies all dependencies, build, and packaging steps.
Dependencies
============
The only buildtime dependencies are git and go which are available via pacman.
The -s flag can be used on makepkg commands below to automatically install
these dependencies.
Building Package
================
Download the tarball for either AUR packaged to a local directory. In that
directory makepkg can be run to build the package.
# Build the binary package
makepkg
# Build an updated source tarball
makepkg --source

View File

@@ -1,73 +1,62 @@
# Ubuntu package Makefile
#
# Dependencies: debhelper autotools-dev devscripts golang
# Notes:
# Use 'make ubuntu' to create the ubuntu package
# GPG_KEY environment variable needs to contain a GPG private key for package to be signed
# and uploaded to docker PPA.
# If GPG_KEY is not defined, make ubuntu will create docker package and exit with
# status code 2
PKG_NAME=lxc-docker
PKG_ARCH=amd64
PKG_VERSION=1
ROOT_PATH:=$(PWD)
BUILD_PATH=build # Do not change, decided by dpkg-buildpackage
BUILD_SRC=build_src
GITHUB_PATH=src/github.com/dotcloud/docker
INSDIR=usr/bin
SOURCE_PACKAGE=$(PKG_NAME)_$(PKG_VERSION).orig.tar.gz
DEB_PACKAGE=$(PKG_NAME)_$(PKG_VERSION)_$(PKG_ARCH).deb
EXTRA_GO_PKG=./auth
VERSION=$(shell head -1 changelog | sed 's/^.\+(\(.\+\)..).\+$$/\1/')
GITHUB_PATH=github.com/dotcloud/docker
DOCKER_VERSION=${PKG_NAME}_${VERSION}
DOCKER_FVERSION=${PKG_NAME}_$(shell head -1 changelog | sed 's/^.\+(\(.\+\)).\+$$/\1/')
BUILD_SRC=${CURDIR}/../../build_src
VERSION_TAG=v$(shell head -1 changelog | sed 's/^.\+(\(.\+\)-[0-9]\+).\+$$/\1/')
TMPDIR=$(shell mktemp -d -t XXXXXX)
all:
# Compile docker. Used by dpkg-buildpackage.
cd src/${GITHUB_PATH}/docker; GOPATH=${CURDIR} go build
# Build a debian source package
all: clean build_in_deb
build_in_deb:
echo "GOPATH = " $(ROOT_PATH)
mkdir bin
cd $(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH) go build -o $(ROOT_PATH)/bin/docker
# DESTDIR provided by Debian packaging
install:
# Call this from a go environment (as packaged for deb source package)
mkdir -p $(DESTDIR)/$(INSDIR)
mkdir -p $(DESTDIR)/etc/init
install -m 0755 bin/docker $(DESTDIR)/$(INSDIR)
install -o root -m 0755 etc/docker.upstart $(DESTDIR)/etc/init/docker.conf
# Used by dpkg-buildpackage
mkdir -p ${DESTDIR}/usr/bin
mkdir -p ${DESTDIR}/etc/init
mkdir -p ${DESTDIR}/DEBIAN
install -m 0755 src/${GITHUB_PATH}/docker/docker ${DESTDIR}/usr/bin
install -o root -m 0755 debian/docker.upstart ${DESTDIR}/etc/init/docker.conf
install debian/lxc-docker.prerm ${DESTDIR}/DEBIAN/prerm
install debian/lxc-docker.postinst ${DESTDIR}/DEBIAN/postinst
$(BUILD_SRC): clean
# Copy ourselves into $BUILD_SRC to comply with unusual golang constraints
tar --exclude=*.tar.gz --exclude=checkout.tgz -f checkout.tgz -cz *
mkdir -p $(BUILD_SRC)/$(GITHUB_PATH)
tar -f checkout.tgz -C $(BUILD_SRC)/$(GITHUB_PATH) -xz
cd $(BUILD_SRC)/$(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go get -d
for d in `find $(BUILD_SRC) -name '.git*'`; do rm -rf $$d; done
# Populate source build with debian stuff
cp -R -L ./deb/* $(BUILD_SRC)
$(SOURCE_PACKAGE): $(BUILD_SRC)
rm -f $(SOURCE_PACKAGE)
# Create the debian source package
tar -f $(SOURCE_PACKAGE) -C ${ROOT_PATH}/${BUILD_SRC} -cz .
# Build deb package fetching go dependencies and cleaning up git repositories
deb: $(DEB_PACKAGE)
$(DEB_PACKAGE): $(SOURCE_PACKAGE)
# dpkg-buildpackage looks for source package tarball in ../
cd $(BUILD_SRC); dpkg-buildpackage
rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files
debsrc: $(SOURCE_PACKAGE)
# Build local sources
#$(PKG_NAME): build_local
build_local:
-@mkdir -p bin
cd docker && go build -o ../bin/docker
gotest:
@echo "\033[36m[Testing]\033[00m docker..."
@sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v . $(EXTRA_GO_PKG) && \
echo -n "\033[32m[OK]\033[00m" || \
echo -n "\033[31m[FAIL]\033[00m"; \
echo " docker"
@sudo rm -rf /tmp/docker-*
clean:
rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files $(BUILD_SRC) checkout.tgz bin
ubuntu:
# This Makefile will compile the github master branch of dotcloud/docker
# Retrieve docker project and its go structure from internet
rm -rf ${BUILD_SRC}
git clone $(shell git rev-parse --show-toplevel) ${BUILD_SRC}/${GITHUB_PATH}
cd ${BUILD_SRC}/${GITHUB_PATH}; git checkout ${VERSION_TAG} && GOPATH=${BUILD_SRC} go get -d
# Add debianization
mkdir ${BUILD_SRC}/debian
cp Makefile ${BUILD_SRC}
cp -r * ${BUILD_SRC}/debian
cp ../../README.md ${BUILD_SRC}
# Cleanup
for d in `find ${BUILD_SRC} -name '.git*'`; do rm -rf $$d; done
rm -rf ${BUILD_SRC}/../${DOCKER_VERSION}.orig.tar.gz
rm -rf ${BUILD_SRC}/pkg
# Create docker debian files
cd ${BUILD_SRC}; tar czf ../${DOCKER_VERSION}.orig.tar.gz .
cd ${BUILD_SRC}; dpkg-buildpackage -us -uc
rm -rf ${BUILD_SRC}
# Sign package and upload it to PPA if GPG_KEY environment variable
# holds a private GPG KEY
if /usr/bin/test "$${GPG_KEY}" == ""; then exit 2; fi
mkdir ${BUILD_SRC}
# Import gpg signing key
echo "$${GPG_KEY}" | gpg --allow-secret-key-import --import
# Sign the package
cd ${BUILD_SRC}; dpkg-source -x ${BUILD_SRC}/../${DOCKER_FVERSION}.dsc
cd ${BUILD_SRC}/${PKG_NAME}-${VERSION}; debuild -S -sa
cd ${BUILD_SRC};dput ppa:dotcloud/lxc-docker ${DOCKER_FVERSION}_source.changes
rm -rf ${BUILD_SRC}

View File

@@ -0,0 +1,37 @@
Docker on Ubuntu
================
The easiest way to get docker up and running natively on Ubuntu is installing
it from its official PPA::
sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
sudo apt-get update
sudo apt-get install lxc-docker
Building docker package
~~~~~~~~~~~~~~~~~~~~~~~
The building process is shared by both, developers and maintainers. If you are
a developer, the Makefile will stop with exit status 2 right before signing
the built packages.
Assuming you are working on an Ubuntu 12.04 TLS system ::
# Download a fresh copy of the docker project
git clone https://github.com/dotcloud/docker.git
cd docker
# Get building dependencies
sudo apt-get update; sudo apt-get install -y debhelper autotools-dev devscripts golang
# Make the ubuntu package
(cd packaging/ubuntu; make ubuntu)
Install docker built package
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
sudo dpkg -i lxc-docker_*_amd64.deb; sudo apt-get install -f -y

12
packaging/ubuntu/Vagrantfile vendored Normal file
View File

@@ -0,0 +1,12 @@
BUILDBOT_IP = '192.168.33.32'
Vagrant::Config.run do |config|
config.vm.box = 'precise64'
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
config.vm.share_folder 'v-data', '/data/docker', "#{File.dirname(__FILE__)}/../.."
config.vm.network :hostonly,BUILDBOT_IP
# Install ubuntu packaging dependencies and create ubuntu packages
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y git debhelper autotools-dev devscripts golang'
config.vm.provision :shell, :inline => "export GPG_KEY='#{ENV['GPG_KEY']}'; cd /data/docker/packaging/ubuntu; make ubuntu"
end

122
packaging/ubuntu/changelog Normal file
View File

@@ -0,0 +1,122 @@
lxc-docker (0.2.0-1) precise; urgency=low
- Runtime: ghost containers can be killed and waited for
- Documentation: update install intructions
- Packaging: fix Vagrantfile
- Development: automate releasing binaries and ubuntu packages
- Add a changelog
- Various bugfixes
-- dotCloud <ops@dotcloud.com> Mon, 23 Apr 2013 00:00:00 -0700
lxc-docker (0.1.8-1) precise; urgency=low
- Dynamically detect cgroup capabilities
- Issue stability warning on kernels <3.8
- 'docker push' buffers on disk instead of memory
- Fix 'docker diff' for removed files
- Fix 'docker stop' for ghost containers
- Fix handling of pidfile
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Mon, 22 Apr 2013 00:00:00 -0700
lxc-docker (0.1.7-1) precise; urgency=low
- Container ports are available on localhost
- 'docker ps' shows allocated TCP ports
- Contributors can run 'make hack' to start a continuous integration VM
- Streamline ubuntu packaging & uploading
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Thu, 18 Apr 2013 00:00:00 -0700
lxc-docker (0.1.6-1) precise; urgency=low
- Record the author an image with 'docker commit -author'
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
lxc-docker (0.1.5-1) precise; urgency=low
- Disable standalone mode
- Use a custom DNS resolver with 'docker -d -dns'
- Detect ghost containers
- Improve diagnosis of missing system capabilities
- Allow disabling memory limits at compile time
- Add debian packaging
- Documentation: installing on Arch Linux
- Documentation: running Redis on docker
- Fixed lxc 0.9 compatibility
- Automatically load aufs module
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
lxc-docker (0.1.4-1) precise; urgency=low
- Full support for TTY emulation
- Detach from a TTY session with the escape sequence `C-p C-q`
- Various bugfixes and stability improvements
- Minor UI improvements
- Automatically create our own bridge interface 'docker0'
-- dotCloud <ops@dotcloud.com> Tue, 9 Apr 2013 00:00:00 -0700
lxc-docker (0.1.3-1) precise; urgency=low
- Choose TCP frontend port with '-p :PORT'
- Layer format is versioned
- Major reliability improvements to the process manager
- Various bugfixes and stability improvements
-- dotCloud <ops@dotcloud.com> Thu, 4 Apr 2013 00:00:00 -0700
lxc-docker (0.1.2-1) precise; urgency=low
- Set container hostname with 'docker run -h'
- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
- Various bugfixes and stability improvements
- UI polish
- Progress bar on push/pull
- Use XZ compression by default
- Make IP allocator lazy
-- dotCloud <ops@dotcloud.com> Wed, 3 Apr 2013 00:00:00 -0700
lxc-docker (0.1.1-1) precise; urgency=low
- Display shorthand IDs for convenience
- Stabilize process management
- Layers can include a commit message
- Simplified 'docker attach'
- Fixed support for re-attaching
- Various bugfixes and stability improvements
- Auto-download at run
- Auto-login on push
- Beefed up documentation
-- dotCloud <ops@dotcloud.com> Sun, 31 Mar 2013 00:00:00 -0700
lxc-docker (0.1.0-1) precise; urgency=low
- First release
- Implement registry in order to push/pull images
- TCP port allocation
- Fix termcaps on Linux
- Add documentation
- Add Vagrant support with Vagrantfile
- Add unit tests
- Add repository/tags to ease image management
- Improve the layer implementation
-- dotCloud <ops@dotcloud.com> Sat, 23 Mar 2013 00:00:00 -0700

19
packaging/ubuntu/control Normal file
View File

@@ -0,0 +1,19 @@
Source: lxc-docker
Section: misc
Priority: extra
Maintainer: Daniel Mizyrycki <daniel@dotcloud.com>
Build-Depends: debhelper,autotools-dev,devscripts,golang
Standards-Version: 3.9.3
Homepage: http://github.com/dotcloud/docker
Package: lxc-docker
Architecture: linux-any
Depends: ${misc:Depends},${shlibs:Depends},lxc,bsdtar
Conflicts: docker
Description: lxc-docker is a Linux container runtime
Docker complements LXC with a high-level API which operates at the process
level. It runs unix processes with strong guarantees of isolation and
repeatability across servers.
Docker is a great building block for automating distributed systems:
large-scale web deployments, database clusters, continuous deployment systems,
private PaaS, service-oriented architectures, etc.

237
packaging/ubuntu/copyright Normal file
View File

@@ -0,0 +1,237 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: docker
Upstream-Contact: DotCloud Inc <opensource@dotcloud.com>
Source: http://github.com/dotcloud/docker
Files: *
Copyright: 2012, DotCloud Inc <opensource@dotcloud.com>
License: Apache-2.0
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2012 DotCloud Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Files: src/github.com/kr/pty/*
Copyright: Copyright (c) 2011 Keith Rarick
License: Expat
Copyright (c) 2011 Keith Rarick
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall
be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,5 +0,0 @@
lxc-docker (1) precise; urgency=low
* Initial release
-- dotCloud <ops@dotcloud.com> Mon, 25 Mar 2013 05:51:12 -0700

View File

@@ -1,19 +0,0 @@
Source: lxc-docker
Section: misc
Priority: extra
Homepage: http://docker.io
Maintainer: Daniel Mizyrycki <daniel@dotcloud.com>
Build-Depends: debhelper (>= 8.0.0), pkg-config, git, golang, libsqlite3-dev
Vcs-Git: http://github.com/dotcloud/docker.git
Standards-Version: 3.9.3
Package: lxc-docker
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, lxc, wget, bsdtar, curl, sqlite3
Conflicts: docker
Description: A process manager with superpowers
It encapsulates heterogeneous payloads in Standard Containers, and runs
them on any server with strong guarantees of isolation and repeatability.
Is is a great building block for automating distributed systems:
large-scale web deployments, database clusters, continuous deployment
systems, private PaaS, service-oriented architectures, etc.

View File

@@ -1,209 +0,0 @@
Format: http://dep.debian.net/deps/dep5
Upstream-Name: docker
Source: https://github.com/dotcloud/docker
Files: *
Copyright: 2012 DotCloud Inc (opensource@dotcloud.com)
License: Apache License Version 2.0
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2012 DotCloud Inc (opensource@dotcloud.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -5,6 +5,6 @@ stop on starting rc RUNLEVEL=[016]
respawn
script
test -f /etc/default/locale && . /etc/default/locale || true
LANG=$LANG LC_ALL=$LANG /usr/bin/docker -d
# FIXME: docker should not depend on the system having en_US.UTF-8
LC_ALL='en_US.UTF-8' /usr/bin/docker -d
end script

View File

@@ -0,0 +1,4 @@
#!/bin/sh
# Start docker
/sbin/start docker

View File

@@ -0,0 +1,4 @@
#!/bin/sh
# Stop docker
/sbin/stop docker

View File

@@ -0,0 +1,39 @@
Maintainer duty
===============
Ubuntu allows developers to use their PPA (Personal Package Archive)
repository. This is very convenient for the users as they just need to add
the PPA address, update their package database and use the apt-get tool.
For now, the official lxc-docker package is located on launchpad and can be
accessed adding the following line to /etc/apt/sources.list ::
deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main
Releasing a new package
~~~~~~~~~~~~~~~~~~~~~~~
The most relevant information to update is the packaging/ubuntu/changelog file:
Each new release should create a new first paragraph with new release version,
changes, and the maintainer information. The core of this paragraph is
located on CHANGELOG.md. Make sure to transcribe it and translate the formats
(eg: packaging/ubuntu/changelog uses 2 spaces for body change descriptions
instead of 1 space from CHANGELOG.md)
Assuming your PPA GPG signing key is on /media/usbdrive/docker.key, load it
into the GPG_KEY environment variable with::
export GPG_KEY=`cat /media/usbdrive/docker.key`
After this is done and you are ready to upload the package to the PPA, you have
a couple of choices:
* Follow packaging/ubuntu/README.ubuntu to generate the actual source packages
and upload them to the PPA
* Let vagrant do all the work for you::
( cd docker/packaging/ubuntu; vagrant up )

View File

@@ -7,6 +7,7 @@ import (
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strings"
)
@@ -135,7 +136,7 @@ func (graph *Graph) getRemoteImage(stdout io.Writer, imgId string, authConfig *a
if err != nil {
return nil, nil, err
}
return img, ProgressReader(res.Body, int(res.ContentLength), stdout), nil
return img, ProgressReader(res.Body, int(res.ContentLength), stdout, "Downloading %v/%v (%v)"), nil
}
func (graph *Graph) PullImage(stdout io.Writer, imgId string, authConfig *auth.AuthConfig) error {
@@ -269,24 +270,20 @@ func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth
return fmt.Errorf("Failed to retrieve layer upload location: %s", err)
}
// FIXME: Don't do this :D. Check the S3 requierement and implement chunks of 5MB
// FIXME2: I won't stress it enough, DON'T DO THIS! very high priority
layerData2, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
tmp, err := ioutil.ReadAll(layerData2)
// FIXME: stream the archive directly to the registry instead of buffering it on disk. This requires either:
// a) Implementing S3's proprietary streaming logic, or
// b) Stream directly to the registry instead of S3.
// I prefer option b. because it doesn't lock us into a proprietary cloud service.
tmpLayer, err := graph.TempLayerArchive(img.Id, Xz, stdout)
if err != nil {
return err
}
layerLength := len(tmp)
layerData, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
if err != nil {
return fmt.Errorf("Failed to generate layer archive: %s", err)
}
req3, err := http.NewRequest("PUT", url.String(), ProgressReader(layerData.(io.ReadCloser), layerLength, stdout))
defer os.Remove(tmpLayer.Name())
req3, err := http.NewRequest("PUT", url.String(), ProgressReader(tmpLayer, int(tmpLayer.Size), stdout, "Uploading %v/%v (%v)"))
if err != nil {
return err
}
req3.ContentLength = int64(layerLength)
req3.ContentLength = int64(tmpLayer.Size)
req3.TransferEncoding = []string{"none"}
res3, err := client.Do(req3)

View File

@@ -6,6 +6,7 @@ import (
"github.com/dotcloud/docker/auth"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
@@ -14,6 +15,11 @@ import (
"time"
)
type Capabilities struct {
MemoryLimit bool
SwapLimit bool
}
type Runtime struct {
root string
repository string
@@ -23,6 +29,8 @@ type Runtime struct {
repositories *TagStore
authConfig *auth.AuthConfig
idIndex *TruncIndex
capabilities *Capabilities
kernelVersion *KernelVersionInfo
}
var sysInitPath string
@@ -176,12 +184,6 @@ func (runtime *Runtime) Register(container *Container) error {
}
}
// If the container is not running or just has been flagged not running
// then close the wait lock chan (will be reset upon start)
if !container.State.Running {
close(container.waitLock)
}
// Even if not running, we init the lock (prevents races in start/stop/kill)
container.State.initLock()
@@ -199,6 +201,15 @@ func (runtime *Runtime) Register(container *Container) error {
// done
runtime.containers.PushBack(container)
runtime.idIndex.Add(container.Id)
// If the container is not running or just has been flagged not running
// then close the wait lock chan (will be reset upon start)
if !container.State.Running {
close(container.waitLock)
} else {
container.allocateNetwork()
go container.monitor()
}
return nil
}
@@ -217,7 +228,7 @@ func (runtime *Runtime) Destroy(container *Container) error {
return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.Id)
}
if err := container.Stop(); err != nil {
if err := container.Stop(10); err != nil {
return err
}
if mounted, err := container.Mounted(); err != nil {
@@ -238,7 +249,7 @@ func (runtime *Runtime) Destroy(container *Container) error {
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository
func (runtime *Runtime) Commit(id, repository, tag, comment string) (*Image, error) {
func (runtime *Runtime) Commit(id, repository, tag, comment, author string) (*Image, error) {
container := runtime.Get(id)
if container == nil {
return nil, fmt.Errorf("No such container: %s", id)
@@ -250,7 +261,7 @@ func (runtime *Runtime) Commit(id, repository, tag, comment string) (*Image, err
return nil, err
}
// Create a new image from the container's base layers + a new layer from container changes
img, err := runtime.graph.Create(rwTar, container, comment)
img, err := runtime.graph.Create(rwTar, container, comment, author)
if err != nil {
return nil, err
}
@@ -282,7 +293,37 @@ func (runtime *Runtime) restore() error {
// FIXME: harmonize with NewGraph()
func NewRuntime() (*Runtime, error) {
return NewRuntimeFromDirectory("/var/lib/docker")
runtime, err := NewRuntimeFromDirectory("/var/lib/docker")
if err != nil {
return nil, err
}
if k, err := GetKernelVersion(); err != nil {
log.Printf("WARNING: %s\n", err)
} else {
runtime.kernelVersion = k
if CompareKernelVersion(k, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
}
}
if cgroupMemoryMountpoint, err := FindCgroupMountpoint("memory"); err != nil {
log.Printf("WARNING: %s\n", err)
} else {
_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes"))
_, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes"))
runtime.capabilities.MemoryLimit = err1 == nil && err2 == nil
if !runtime.capabilities.MemoryLimit {
log.Printf("WARNING: Your kernel does not support cgroup memory limit.")
}
_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
runtime.capabilities.SwapLimit = err == nil
if !runtime.capabilities.SwapLimit {
log.Printf("WARNING: Your kernel does not support cgroup swap limit.")
}
}
return runtime, nil
}
func NewRuntimeFromDirectory(root string) (*Runtime, error) {
@@ -321,6 +362,7 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) {
repositories: repositories,
authConfig: authConfig,
idIndex: NewTruncIndex(),
capabilities: &Capabilities{},
}
if err := runtime.restore(); err != nil {

View File

@@ -1,9 +1,11 @@
package docker
import (
"fmt"
"github.com/dotcloud/docker/rcli"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"os/user"
@@ -46,8 +48,6 @@ func layerArchive(tarfile string) (io.Reader, error) {
}
func init() {
NO_MEMORY_LIMIT = os.Getenv("NO_MEMORY_LIMIT") == "1"
// Hack to run sys init during unit testing
if SelfPath() == "/sbin/init" {
SysInit()
@@ -254,6 +254,47 @@ func TestGet(t *testing.T) {
}
// Run a container with a TCP port allocated, and test that it can receive connections on localhost
func TestAllocatePortLocalhost(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"sh", "-c", "echo well hello there | nc -l -p 5555"},
PortSpecs: []string{"5555"},
},
)
if err != nil {
t.Fatal(err)
}
if err := container.Start(); err != nil {
t.Fatal(err)
}
defer container.Kill()
time.Sleep(600 * time.Millisecond) // Wait for the container to run
conn, err := net.Dial("tcp",
fmt.Sprintf(
"localhost:%s", container.NetworkSettings.PortMapping["5555"],
),
)
if err != nil {
t.Fatal(err)
}
defer conn.Close()
output, err := ioutil.ReadAll(conn)
if err != nil {
t.Fatal(err)
}
if string(output) != "well hello there\n" {
t.Fatalf("Received wrong output from network connection: should be '%s', not '%s'",
"well hello there\n",
string(output),
)
}
}
func TestRestore(t *testing.T) {
root, err := ioutil.TempDir("", "docker-test")

View File

@@ -12,6 +12,7 @@ import (
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
@@ -69,23 +70,30 @@ type progressReader struct {
readTotal int // Expected stream length (bytes)
readProgress int // How much has been read so far (bytes)
lastUpdate int // How many bytes read at least update
template string // Template to print. Default "%v/%v (%v)"
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.readProgress += read
// Only update progress for every 1% read
updateEvery := int(0.01 * float64(r.readTotal))
if r.readProgress-r.lastUpdate > updateEvery || r.readProgress == r.readTotal {
fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r",
r.readProgress,
r.readTotal,
float64(r.readProgress)/float64(r.readTotal)*100)
updateEvery := 4096
if r.readTotal > 0 {
// Only update progress for every 1% read
if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery {
updateEvery = increment
}
}
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
if r.readTotal > 0 {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
} else {
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, "?", "n/a")
}
r.lastUpdate = r.readProgress
}
// Send newline when complete
if err == io.EOF {
if err != nil {
fmt.Fprintf(r.output, "\n")
}
@@ -94,8 +102,11 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader {
return &progressReader{r, output, size, 0, 0}
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template string) *progressReader {
if template == "" {
template = "%v/%v (%v)"
}
return &progressReader{r, output, size, 0, 0, template}
}
// HumanDuration returns a human-readable approximation of a duration
@@ -384,3 +395,59 @@ func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
}
return written, err
}
type KernelVersionInfo struct {
Kernel int
Major int
Minor int
Flavor string
}
// FIXME: this doens't build on Darwin
func GetKernelVersion() (*KernelVersionInfo, error) {
return getKernelVersion()
}
func (k *KernelVersionInfo) String() string {
return fmt.Sprintf("%d.%d.%d-%s", k.Kernel, k.Major, k.Minor, k.Flavor)
}
// Compare two KernelVersionInfo struct.
// Returns -1 if a < b, = if a == b, 1 it a > b
func CompareKernelVersion(a, b *KernelVersionInfo) int {
if a.Kernel < b.Kernel {
return -1
} else if a.Kernel > b.Kernel {
return 1
}
if a.Major < b.Major {
return -1
} else if a.Major > b.Major {
return 1
}
if a.Minor < b.Minor {
return -1
} else if a.Minor > b.Minor {
return 1
}
return 0
}
func FindCgroupMountpoint(cgroupType string) (string, error) {
output, err := exec.Command("mount").CombinedOutput()
if err != nil {
return "", err
}
reg := regexp.MustCompile(`^.* on (.*) type cgroup \(.*` + cgroupType + `[,\)]`)
for _, line := range strings.Split(string(output), "\n") {
r := reg.FindStringSubmatch(line)
if len(r) == 2 {
return r[1], nil
}
}
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
}

View File

@@ -228,3 +228,36 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
}
}
func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
if r := CompareKernelVersion(a, b); r != result {
t.Fatalf("Unepected kernel version comparaison result. Found %d, expected %d", r, result)
}
}
func TestCompareKernelVersion(t *testing.T) {
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
0)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
-1)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
1)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"},
0)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
1)
assertKernelVersion(t,
&KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"},
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
-1)
}