Compare commits

..

146 Commits

Author SHA1 Message Date
Solomon Hykes
57e2126a02 Bumped version to 0.1.0 2013-03-23 17:48:18 -07:00
Solomon Hykes
5130d3d6ec Merge branch 'graph' 2013-03-23 17:47:33 -07:00
Solomon Hykes
587debf92e Merge branch 'graph' of http://github.com/dotcloud/docker into graph 2013-03-23 17:04:37 -07:00
Solomon Hykes
d301c7b98c 'docker images' doesn't show all anonymous images by default - only anonymous heads 2013-03-23 17:03:30 -07:00
Solomon Hykes
f43fbda2a4 No more dependency on sqlite 2013-03-23 16:17:01 -07:00
Solomon Hykes
301a8afff5 Properly cleanup iptables rules inserted in OUTPUT (introduced in 3c6b8bb888) 2013-03-22 22:31:20 -07:00
Solomon Hykes
690cae0ef4 Merge pull request #133 from shawnsi/master
Vagrant Provision Touch Up
2013-03-22 15:11:52 -07:00
Solomon Hykes
936bd87a52 Merge pull request #124 from donspaulding/master
Fix typos in the README
2013-03-22 15:10:13 -07:00
Guillaume J. Charmes
dc2d930520 Remove the json alterations when decoding 2013-03-22 14:27:10 -07:00
Guillaume J. Charmes
966cddf26b Add some verbosity to the push/pull 2013-03-22 13:21:44 -07:00
Guillaume J. Charmes
6e35f28352 Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-22 13:10:31 -07:00
Solomon Hykes
0146c80c40 An image embeds the configuration of its parent container ('ContainerConfig') 2013-03-23 14:48:16 -07:00
Solomon Hykes
b809dc4271 Merge branch 'graph' of ssh://github.com/dotcloud/docker into graph 2013-03-23 14:18:41 -07:00
Solomon Hykes
f37c432bd5 Fixed 'docker inspect' to exit silently when an image doesn't exist 2013-03-23 14:18:35 -07:00
Solomon Hykes
d9471bee3d Merge branch 'graph' of ssh://github.com/dotcloud/docker into graph 2013-03-23 12:39:36 -07:00
Solomon Hykes
6ce64e8458 Moved image name into config. runtime.Create() now receives a single Config parameter 2013-03-23 12:39:09 -07:00
Solomon Hykes
031f91df1a runtime.Create receives an image name + Config. The Config includes all required runtime information: command, environment, ports etc. 2013-03-23 12:16:58 -07:00
Guillaume J. Charmes
89763bc8af Remove the lookup before pushing 2013-03-22 13:10:17 -07:00
Guillaume J. Charmes
6e507b9460 Add a Debugf() helper and a -D (debug) flag to docker 2013-03-22 11:44:12 -07:00
Guillaume J. Charmes
c57727fd65 Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-22 11:18:33 -07:00
Guillaume J. Charmes
a2e5333a93 Make sure the remote repository exists prior to push 2013-03-22 10:42:13 -07:00
Guillaume J. Charmes
5e6355d182 Fix the lookup method 2013-03-22 10:22:22 -07:00
Joffrey F
e4886a9e33 Merge pull request #134 from shawnsi/98-natfix
Fixing Issue #98: Adding DOCKER to output chain during iptables setup
2013-03-22 09:35:07 -07:00
Shawn Siefkas
3c6b8bb888 Fixing Issue #98: Adding DOCKER to output chain during iptables setup 2013-03-22 11:28:15 -05:00
Shawn Siefkas
cff01ec5ec Redirecting docker daemon stdout/stderr to /var/log/dockerd 2013-03-22 10:30:47 -05:00
creack
0c4b639083 Update makefile gotest 2013-03-22 08:04:53 -07:00
creack
d1c8eabc63 Fix/Improve push/pull registry 2013-03-22 07:56:44 -07:00
creack
c000a5ed75 Remove the possibility to choose the remote name on push 2013-03-22 07:10:52 -07:00
Shawn Siefkas
9542876d2f Waiting to start docker until the fresh binaries have been copied in 2013-03-22 08:48:50 -05:00
creack
899613f788 Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-22 06:39:08 -07:00
Solomon Hykes
829eeb07f8 'docker run' with no argument no longer hardcodes a default image and command 2013-03-22 20:55:17 -07:00
Solomon Hykes
841c7ac0f9 Deprecated 'docker run -a'. Containers are run in the foreground by default. '-d' enables detached mode 2013-03-22 20:46:14 -07:00
Solomon Hykes
34fbaa5f6d 'docker run -e': set environment variables in a container 2013-03-22 20:36:34 -07:00
Solomon Hykes
9b5f0fac81 Fix 'docker import' to accept urls without explicit http:// scheme 2013-03-22 19:47:32 -07:00
creack
1ed78ee160 Improve (drastically) the push 2013-03-22 06:38:54 -07:00
creack
e726bdcce2 Fix the rootPath for auth 2013-03-22 05:52:13 -07:00
creack
ab3a57d01b Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-22 04:44:46 -07:00
Solomon Hykes
bc51d961cd Merge branch 'graph' of ssh://github.com/dotcloud/docker into graph 2013-03-22 19:22:24 -07:00
Solomon Hykes
12049f956a 'docker {history,ps,images}': show human-friendly image names when applicable 2013-03-22 19:22:06 -07:00
Solomon Hykes
7952e6befe Merge branch 'master' into graph 2013-03-22 18:30:46 -07:00
Solomon Hykes
72e16f6d96 Merge branch 'graph' of ssh://github.com/dotcloud/docker into graph 2013-03-22 18:27:32 -07:00
Solomon Hykes
bf7602bc09 'docker tag': assign a repository+tag to an image 2013-03-22 18:27:18 -07:00
Solomon Hykes
520af226c0 Merge branch 'graph' of ssh://github.com/dotcloud/docker into graph 2013-03-22 17:52:27 -07:00
Solomon Hykes
542c66997f 'docker inspect' can lookup image by repository and tag 2013-03-22 17:52:19 -07:00
Solomon Hykes
f8ebeaae10 Removed debug command 'docker mount' 2013-03-22 17:44:12 -07:00
Solomon Hykes
56752158af Merge branch 'graph' of ssh://github.com/dotcloud/docker into graph 2013-03-22 17:40:32 -07:00
Solomon Hykes
09b27f9e8d Fancier output for 'docker history' 2013-03-22 17:22:32 -07:00
creack
77549ad4f6 Improve the error management with the registry communication 2013-03-22 04:44:07 -07:00
creack
fc0eac37e4 Put back the "official" repo 2013-03-22 04:37:18 -07:00
creack
db8f2a1a9d Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-22 04:35:19 -07:00
creack
08cb430281 Move the debian makefile to avoid confusions 2013-03-22 04:34:46 -07:00
shin-
9fa3d891c6 Merge branch 'master' of github.com:dotcloud/docker 2013-03-22 04:25:37 -07:00
shin-
b00ff47963 Fixing newlines in attached mode 2013-03-22 04:24:03 -07:00
creack
e02f7912bc Enforce login for push/pull 2013-03-22 03:43:57 -07:00
creack
e4f9a0dca0 Update the help with push/pull 2013-03-22 03:24:37 -07:00
creack
3870ebee6d Add content type to Push 2013-03-22 03:22:36 -07:00
creack
062ebff098 Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-22 03:10:30 -07:00
creack
0eed4b4386 Add some verbosity to the push/pull features 2013-03-22 03:10:09 -07:00
creack
4307b7dd8e Add authentification to all registry call 2013-03-22 02:57:28 -07:00
creack
c72ff318d3 Integrate Auth in runtime and make the config file relative to runtime root 2013-03-22 02:19:39 -07:00
creack
5e561a9d52 Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-22 01:27:16 -07:00
Solomon Hykes
640026ec59 Looking up a tag by repository name will default to REPOSITORY:latest. The empty tag '' is no longer allowed. 2013-03-22 16:07:13 -07:00
creack
11c4294846 Handle push/pull of repositories 2013-03-22 01:25:27 -07:00
Solomon Hykes
1850e8d49c Merge pull request #128 from cespare/markdown-fixes2
Fix numbering in README markdown.
2013-03-21 22:46:22 -07:00
Solomon Hykes
afb4a36ffa Merge pull request #130 from dotcloud/113_vagrant-compat
113 vagrant compat
2013-03-21 22:44:38 -07:00
Daniel Mizyrycki
45df6f7801 vagrant; issue #113: normalize whitespaces 2013-03-21 22:26:18 -07:00
Daniel Mizyrycki
6295a02275 vagrant; issue #113: Make Vagrantfile backward compatible with versions < 1.1 2013-03-21 22:16:52 -07:00
dhrp
13e597a5ad create README.md at this place for preview. 2013-03-21 21:47:14 -07:00
Caleb Spare
d515e2b06c Fix numbering in README markdown. 2013-03-21 21:46:00 -07:00
Solomon Hykes
41c664cacf Merge pull request #126 from cespare/markdown-fixes
Markdown fixes in the readme.
2013-03-21 18:24:46 -07:00
Caleb Spare
7566006d0d Markdown fixes in the readme. 2013-03-21 18:20:54 -07:00
creack
d8fa52b7b5 Comply the tests with golang TIP 2013-03-21 10:31:02 -07:00
creack
f246cc9cdd Apply the new WalkHistory prototype to merge 2013-03-21 10:12:05 -07:00
creack
da266e6c7b Factorize the pull/push commands and create a registry.go 2013-03-21 10:10:14 -07:00
Solomon Hykes
f50dcbe404 Image.ParentCommand and Image.ParentConatiner should be stored 2013-03-21 22:45:22 -07:00
Solomon Hykes
cdd62522b6 Merge branch 'graph' of ssh://github.com/dotcloud/docker into graph 2013-03-21 22:21:00 -07:00
Solomon Hykes
1ad69ad415 'docker history': show the history of an image 2013-03-21 21:42:18 -07:00
Solomon Hykes
05ae69a6eb 'docker commit' records parent container id and command, in addition to parent image 2013-03-21 21:13:27 -07:00
Solomon Hykes
8396798eba 'docker commit' can optionally tag the new image into a repository 2013-03-21 20:07:37 -07:00
Solomon Hykes
49a78929c6 Repositories and tags can't have ':' in their name (to allow parsing the REPO:TAG notation) 2013-03-21 20:06:20 -07:00
Solomon Hykes
379d449c44 'docker run' can reference an image by REPOSITORY:TAG 2013-03-21 19:01:55 -07:00
Solomon Hykes
d0c776528b Fix a bug which caused repository metadata to be cleared at startup 2013-03-21 19:00:43 -07:00
Solomon Hykes
4af8b711c0 Fixed output quirks in 'docker images' 2013-03-21 18:59:12 -07:00
Solomon Hykes
ef711962d5 Folded graph/ back into main package 2013-03-21 17:47:23 -07:00
Solomon Hykes
44faa07b6c First integration of runtime with repositories & tags 2013-03-21 17:35:49 -07:00
Solomon Hykes
680f40c37e graph.RepoStore: first draft of a Repository/Tag on top of the graph 2013-03-21 12:18:47 -07:00
shin-
3aefed2dc2 When lxcbr0 has several associated IPs, default to first one found 2013-03-21 09:19:22 -07:00
Don Spaulding
8ff60ddef4 Typos in the README 2013-03-21 08:39:52 -05:00
creack
eef9659c95 merge graph in graph 2013-03-21 06:35:57 -07:00
creack
42cf74d56b POC: push/pull are (kinda) working 2013-03-21 06:33:29 -07:00
creack
04ba4348de Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-21 04:32:04 -07:00
creack
864a8d9aca Merge branch 'graph' of github.com:dotcloud/docker into graph 2013-03-21 03:54:24 -07:00
creack
edcfd687ef POC of push/pull for images, pull works, push do push but without the layer 2013-03-21 03:53:27 -07:00
creack
3e8d1dfb69 Enforce model for the json image format 2013-03-21 03:52:58 -07:00
Solomon Hykes
b6b5e5cec1 Merged master 2013-03-21 02:13:21 -07:00
Solomon Hykes
f783759928 Docker currently doesn't support 32-bit hosts. Let's make that clear by failing right away with an informative message 2013-03-21 02:04:10 -07:00
Solomon Hykes
c37d7aad36 Merge branch 'master' into graph 2013-03-21 01:43:32 -07:00
Solomon Hykes
d65983f386 No need for a Container.networkManager field: it can be accessed via Container.runtime 2013-03-21 01:43:03 -07:00
Solomon Hykes
1ed13f65fe Merge branch 'master' into graph 2013-03-21 01:38:44 -07:00
Solomon Hykes
377cebe36f Renamed docker*.go to runtime*.go 2013-03-21 01:24:54 -07:00
Solomon Hykes
2f781f2128 Removed 'sparse export' mode, it is deprecated by the new diff-based transfer protocol 2013-03-21 01:24:12 -07:00
Solomon Hykes
e627a0da1e Renamed 'docker tar' to 'docker export' for symmetry with 'docker import' 2013-03-21 01:23:00 -07:00
Solomon Hykes
623e91e2e3 Moved Go() to the main package... And got rid of the useless docker/future package 2013-03-21 01:13:55 -07:00
Solomon Hykes
deb603aaf4 Removed unused utility future.Pv() 2013-03-21 01:10:44 -07:00
Solomon Hykes
0208b6accd moved GenerateId() to the graph package 2013-03-21 01:07:07 -07:00
Solomon Hykes
d7c5d060c4 Moved Download() and progressReader{} to the main package 2013-03-21 00:54:54 -07:00
Solomon Hykes
299d0b2720 Moved HumanDuration() to the main package 2013-03-21 00:52:43 -07:00
Solomon Hykes
b8547f31e4 Renamed Docker{} to Runtime{} for clarity 2013-03-21 00:41:15 -07:00
Solomon Hykes
7c57a4cfc0 Simplified the core container API, ported it to the new graph. Some features are missing eg. image 'paths' and tags 2013-03-21 00:25:00 -07:00
Solomon Hykes
84e8c4aa1d Fixed a bug in graph.Graph.Get() 2013-03-21 00:21:32 -07:00
Solomon Hykes
89a140fb75 Removed redundant mount_test.go (graph_test.go already tests the mount ability) 2013-03-21 00:21:03 -07:00
Guillaume J. Charmes
3e9877a30f Merge pull request #122 from srid/patch-1
remove ! from command line
2013-03-20 23:26:50 -07:00
Sridhar Ratnakumar
3bb176d8ae remove ! from command line
bash does not like it

```
$ JOB=$(docker run base /bin/sh -c "while true; do echo Hello world!; sleep 1; done")
bash: !: event not found
$
```
2013-03-20 22:53:42 -07:00
Solomon Hykes
34023558f5 Pruned more semi-useless commands: 'docker cat', 'docker cp', 'docker ls', 'docker write'. Removed outdated commands from help message 2013-03-20 22:48:52 -07:00
Solomon Hykes
9d82bab041 Removed anal warning from 'go vet' 2013-03-20 22:42:50 -07:00
Solomon Hykes
3eff62394b Removed dependency on the fake package in graph unit tests 2013-03-20 22:42:08 -07:00
Solomon Hykes
4d9c324495 Removed extra import 2013-03-20 22:41:31 -07:00
Solomon Hykes
75c866d6a3 Unmount() and Mounted(): utility functions to unmount a mountpoint and check if it's mounted, respectively 2013-03-20 22:41:03 -07:00
Solomon Hykes
6f6eaca861 Removed mount code from container. It belongs in graph 2013-03-20 22:16:02 -07:00
Solomon Hykes
ea258c4492 docker/fs is deprecated by docker/graph 2013-03-20 22:15:09 -07:00
Solomon Hykes
240333277a Removed Image.Unmount(). It belongs in container code 2013-03-20 22:13:57 -07:00
Solomon Hykes
9e8278134d Image.Mount(): create rw and rootfs directory if they don't exist 2013-03-20 22:13:28 -07:00
Solomon Hykes
c8db980add Image.Changes(): list all changes between an image and a rw directory 2013-03-20 22:12:38 -07:00
Guillaume J. Charmes
3f63e3426e Merge pull request #121 from ezbercih/patch-1
Fix issue #120, initialize TCPAddr w/ field names
2013-03-20 20:25:16 -07:00
Solomon Hykes
31296cc3f7 Removed deprecated or useless commands (cp, layers, reset, mirror, debug, web) 2013-03-20 20:21:59 -07:00
Solomon Hykes
33d2905cde Merge pull request #115 from termie/readme_update
update the dependencies for the dev environment
2013-03-20 15:20:18 -07:00
termie
2048354c8b update the dev requirements in readme
a little pedantic, perhaps, but on a fresh precise image from vagrant I
still needed these two packages to run the commands following it
2013-03-20 20:17:46 +00:00
Solomon Hykes
98542d4497 Merge branch 'master' into graph 2013-03-20 09:41:37 -07:00
shin-
6d580247c2 Removed 'fake' package. 2013-03-20 07:49:38 -07:00
creack
ab99e9252d Complete pull request #121, init TCPAddr with named field 2013-03-20 06:02:25 -07:00
ezbercih
fac32cda5a Fix issue #120, initialize TCPAddr w/ field names
Current Go tip (+74e65f07a0c8) and likely Go 1.1 does not build docker since net.TCPAddr struct has an additional field now for IPv6:

type TCPAddr struct {
    IP   IP
    Port int
    Zone string // IPv6 scoped addressing zone
}

Initializing the struct with named fields resolves this problem.
2013-03-21 00:11:16 -03:00
Solomon Hykes
a3174fd874 Merge pull request #114 from jpetazzo/whiteboard20130319
Images & repositories, what they mean, and the protocol to push/pull them
2013-03-20 00:18:43 -07:00
Solomon Hykes
ddf4c79977 Merge pull request #112 from srid/devenv
instructions to compile docker
2013-03-20 00:08:54 -07:00
Jérôme Petazzoni
acd51ecea8 add pseudo-spec of images, repositories, push, and pull operations 2013-03-19 20:35:14 -07:00
Sridhar Ratnakumar
4389574aff instructions to compile docker 2013-03-19 20:17:32 -07:00
Solomon Hykes
ff2ae90764 Merge pull request #110 from synack/master
Add linux-image-extra instructions to README
2013-03-19 18:46:28 -07:00
Jeremy Grosser
2508b5cef9 Update README.md 2013-03-19 18:45:11 -07:00
Solomon Hykes
b1acd0a7b0 Merge pull request #106 from kencochrane/users
change registry address to https from http
2013-03-19 17:42:29 -07:00
Ken Cochrane
8be58d3a7f change registry address to https from http 2013-03-19 16:03:17 -07:00
Solomon Hykes
c5051ea0ea Merge pull request #102 from fkautz/master
Some minor cleanup of cleanup in Makefile
2013-03-19 10:07:21 -07:00
Solomon Hykes
3c1db4ca43 Ported test for image deletion 2013-03-18 17:57:18 -07:00
Solomon Hykes
33f6a0aaf5 docker/graph: a store for filesystem images and the graph of their relationships 2013-03-18 00:15:35 -07:00
Frederick F. Kautz IV
6316b99556 Adding clean to beginning of all for cleaner builds. 2013-03-17 11:59:59 -07:00
Frederick F. Kautz IV
bb9ce6b287 Adding bin to clean 2013-03-17 11:54:05 -07:00
Frederick F. Kautz IV
a3ca3e9218 Makefile cleanup renamed to clean to match standard conventions. 2013-03-17 11:53:37 -07:00
jpetazzo
71c997dc83 Add contrib/ directory, README, and script to create a basic busybox image 2013-03-14 03:13:00 +00:00
46 changed files with 2827 additions and 2828 deletions

3
.gitignore vendored
View File

@@ -1,4 +1,5 @@
.vagrant
bin
docker/docker
.*.swp
a.out
@@ -6,3 +7,5 @@ a.out
build_src
command-line-arguments.test
.flymake*
docker.test
auth/auth.test

130
README.md
View File

@@ -1,9 +1,9 @@
Docker: the Linux container runtime
===================================
Docker complements LXC with a high-level API with operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
Docker complements LXC with a high-level API which operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
Is is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
Docker is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
<img src="http://bricks.argz.com/bricksfiles/lego/07000/7823/012.jpg"/>
@@ -11,7 +11,7 @@ Is is a great building block for automating distributed systems: large-scale web
* *Any server*: docker can run on any x64 machine with a modern linux kernel - whether it's a laptop, a bare metal server or a VM. This makes it perfect for multi-cloud deployments.
* *Isolation*: docker isolates processes from each other and from the underlying host, using lightweight containers.
* *Isolation*: docker isolates processes from each other and from the underlying host, using lightweight containers.
* *Repeatability*: because containers are isolated in their own filesystem, they behave the same regardless of where, when, and alongside what they run.
@@ -27,7 +27,7 @@ Notable features
* Copy-on-write: root filesystems are created using copy-on-write, which makes deployment extremeley fast, memory-cheap and disk-cheap.
* Logging: the standard streams (stdout/stderr/stdin) of each process container is collected and logged for real-time or batch retrieval.
* Logging: the standard streams (stdout/stderr/stdin) of each process container are collected and logged for real-time or batch retrieval.
* Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.
@@ -58,25 +58,28 @@ Installing on Ubuntu 12.04 and 12.10
1. Install dependencies:
```bash
sudo apt-get install lxc wget bsdtar curl
```
```bash
sudo apt-get install lxc wget bsdtar curl
sudo apt-get install linux-image-extra-`uname -r`
```
The `linux-image-extra` package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
2. Install the latest docker binary:
```bash
wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
tar -xf docker-master.tgz
```
```bash
wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
tar -xf docker-master.tgz
```
3. Run your first container!
```bash
cd docker-master
sudo ./docker run -a -i -t base /bin/bash
```
```bash
cd docker-master
sudo ./docker run -i -t base /bin/bash
```
Consider adding docker to your `PATH` for simplicity.
Consider adding docker to your `PATH` for simplicity.
Installing on other Linux distributions
---------------------------------------
@@ -96,12 +99,12 @@ with VirtualBox as well as on Amazon EC2. Vagrant 1.1 is required for
EC2, but deploying is as simple as:
```bash
$ export AWS_ACCESS_KEY_ID=xxx \
AWS_SECRET_ACCESS_KEY=xxx \
AWS_KEYPAIR_NAME=xxx \
AWS_SSH_PRIVKEY=xxx
$ vagrant plugin install vagrant-aws
$ vagrant up --provider=aws
$ export AWS_ACCESS_KEY_ID=xxx \
AWS_SECRET_ACCESS_KEY=xxx \
AWS_KEYPAIR_NAME=xxx \
AWS_SSH_PRIVKEY=xxx
$ vagrant plugin install vagrant-aws
$ vagrant up --provider=aws
```
The environment variables are:
@@ -112,11 +115,11 @@ The environment variables are:
* `AWS_SSH_PRIVKEY` - The path to the private key for the named keypair
For VirtualBox, you can simply ignore setting any of the environment
variables and omit the ``provider`` flag. VirtualBox is still supported with
Vagrant <= 1.1:
variables and omit the `provider` flag. VirtualBox is still supported with
Vagrant &lt;= 1.1:
```bash
$ vagrant up
$ vagrant up
```
@@ -128,12 +131,12 @@ Running an interactive shell
----------------------------
```bash
# Download a base image
docker import base
# Download a base image
docker import base
# Run an interactive shell in the base image,
# allocate a tty, attach stdin and stdout
docker run -a -i -t base /bin/bash
# Run an interactive shell in the base image,
# allocate a tty, attach stdin and stdout
docker run -i -t base /bin/bash
```
@@ -141,17 +144,17 @@ Starting a long-running worker process
--------------------------------------
```bash
# Run docker in daemon mode
(docker -d || echo "Docker daemon already running") &
# Run docker in daemon mode
(docker -d || echo "Docker daemon already running") &
# Start a very useful long-running process
JOB=$(docker run base /bin/sh -c "while true; do echo Hello world!; sleep 1; done")
# Start a very useful long-running process
JOB=$(docker run -d base /bin/sh -c "while true; do echo Hello world; sleep 1; done")
# Collect the output of the job so far
docker logs $JOB
# Collect the output of the job so far
docker logs $JOB
# Kill the job
docker kill $JOB
# Kill the job
docker kill $JOB
```
@@ -159,7 +162,7 @@ Listing all running containers
------------------------------
```bash
docker ps
docker ps
```
@@ -167,17 +170,17 @@ Expose a service on a TCP port
------------------------------
```bash
# Expose port 4444 of this container, and tell netcat to listen on it
JOB=$(docker run -p 4444 base /bin/nc -l -p 4444)
# Expose port 4444 of this container, and tell netcat to listen on it
JOB=$(docker run -d -p 4444 base /bin/nc -l -p 4444)
# Which public port is NATed to my container?
PORT=$(docker port $JOB 4444)
# Which public port is NATed to my container?
PORT=$(docker port $JOB 4444)
# Connect to the public port via the host's public address
echo hello world | nc $(hostname) $PORT
# Connect to the public port via the host's public address
echo hello world | nc $(hostname) $PORT
# Verify that the network connection worked
echo "Daemon received: $(docker logs $JOB)"
# Verify that the network connection worked
echo "Daemon received: $(docker logs $JOB)"
```
Contributing to Docker
@@ -192,10 +195,10 @@ Contribution guidelines
We are always thrilled to receive pull requests, and do our best to process them as fast as possible. Not sure if that typo is worth a pull request? Do it! We will appreciate it.
If your pull request is not accepted on the first try, don't be discouraged! If there's a problen with the implementation, hopefully you received feedback on what to improve.
If your pull request is not accepted on the first try, don't be discouraged! If there's a problem with the implementation, hopefully you received feedback on what to improve.
We're trying very hard to keep Docker lean and focused. We don't want it to do everything for everybody. This means that we might decide against incorporating a new feature.
However there might be a way to implement that feature *on top of* docker.
However, there might be a way to implement that feature *on top of* docker.
### Discuss your design on the mailing list
@@ -204,7 +207,7 @@ you in the right direction, give feedback on your design, and maybe point out if
### Create issues...
Any significant improvement should be documented as a github issue before anybody start working on it.
Any significant improvement should be documented as [a github issue](https://github.com/dotcloud/docker/issues) before anybody starts working on it.
### ...but check for existing issues first!
@@ -221,18 +224,39 @@ Golang has a great testing suite built in: use it! Take a look at existing tests
Setting up a dev environment
----------------------------
Coming soon!
Instructions that have been verified to work on Ubuntu 12.10,
```bash
sudo apt-get -y install lxc wget bsdtar curl golang git
export GOPATH=~/go/
export PATH=$GOPATH/bin:$PATH
mkdir -p $GOPATH/src/github.com/dotcloud
cd $GOPATH/src/github.com/dotcloud
git clone git@github.com:dotcloud/docker.git
cd docker
go get -v github.com/dotcloud/docker/...
go install -v github.com/dotcloud/docker/...
```
Then run the docker daemon,
```bash
sudo $GOPATH/bin/docker -d
```
Run the `go install` command (above) to recompile docker.
What is a Standard Container?
=============================
Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependency, regardless of the underlying machine and the contents of the container.
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependencies, regardless of the underlying machine and the contents of the container.
The spec for Standard Containers is currently work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
The spec for Standard Containers is currently a work in progress, but it is very straightforward. It mostly defines 1) an image format, 2) a set of standard operations, and 3) an execution environment.
A great analogy for this is the shipping container. Just like Standard Containers are a fundamental unit of software delivery, shipping containers (http://bricks.argz.com/ins/7823-1/12) are a fundamental unit of physical delivery.

37
Vagrantfile vendored
View File

@@ -1,7 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("1") do |config|
def v10(config)
# All Vagrant configuration is done here. The most common configuration
# options are documented and commented below. For a complete reference,
# please see the online documentation at vagrantup.com.
@@ -20,7 +20,7 @@ Vagrant.configure("1") do |config|
# via the IP. Host-only networks can talk to the host machine as well as
# any other machines on the same network, but cannot be accessed (through this
# network interface) by any external networks.
# config.vm.network :hostonly, "192.168.33.10"
config.vm.network :hostonly, "192.168.33.10"
# Assign this VM to a bridged network, allowing you to connect directly to a
# network using the host's network device. This makes the VM appear as another
@@ -34,6 +34,9 @@ Vagrant.configure("1") do |config|
# Share an additional folder to the guest VM. The first argument is
# an identifier, the second is the path on the guest to mount the
# folder, and the third is the path on the host to the actual folder.
if not File.exist? File.expand_path '~/docker'
Dir.mkdir(File.expand_path '~/docker')
end
config.vm.share_folder "v-data", "~/docker", "~/docker"
# Enable provisioning with Puppet stand alone. Puppet manifests
@@ -99,21 +102,29 @@ Vagrant.configure("1") do |config|
# chef.validation_client_name = "ORGNAME-validator"
end
Vagrant.configure("2") do |config|
"#{Vagrant::VERSION}" < "1.1.0" and Vagrant::Config.run do |config|
v10(config)
end
"#{Vagrant::VERSION}" >= "1.1.0" and Vagrant.configure("1") do |config|
v10(config)
end
"#{Vagrant::VERSION}" >= "1.1.0" and Vagrant.configure("2") do |config|
config.vm.provider :aws do |aws|
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
config.vm.box = "dummy"
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
aws.region = "us-east-1"
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
aws.region = "us-east-1"
aws.ami = "ami-1c1e8075"
aws.ssh_username = "vagrant"
aws.instance_type = "t1.micro"
aws.instance_type = "t1.micro"
end
config.vm.provider :virtualbox do |vb|
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
end
config.vm.box = "quantal64_3.5.0-25"
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
end
end

View File

@@ -1,4 +1,4 @@
package fs
package docker
import (
"errors"
@@ -7,6 +7,8 @@ import (
"os/exec"
)
type Archive io.Reader
type Compression uint32
const (

View File

@@ -1,4 +1,4 @@
package fs
package docker
import (
"io/ioutil"

View File

@@ -8,23 +8,34 @@ import (
"io/ioutil"
"net/http"
"os"
"path"
"strings"
)
// Where we store the config file
const CONFIGFILE = "/var/lib/docker/.dockercfg"
const CONFIGFILE = ".dockercfg"
// the registry server we want to login against
const REGISTRY_SERVER = "http://registry.docker.io"
const REGISTRY_SERVER = "https://registry.docker.io"
type AuthConfig struct {
Username string `json:"username"`
Password string `json:"password"`
Email string `json:"email"`
rootPath string `json:-`
}
func NewAuthConfig(username, password, email, rootPath string) *AuthConfig {
return &AuthConfig{
Username: username,
Password: password,
Email: email,
rootPath: rootPath,
}
}
// create a base64 encoded auth string to store in config
func EncodeAuth(authConfig AuthConfig) string {
func EncodeAuth(authConfig *AuthConfig) string {
authStr := authConfig.Username + ":" + authConfig.Password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
@@ -33,50 +44,54 @@ func EncodeAuth(authConfig AuthConfig) string {
}
// decode the auth string
func DecodeAuth(authStr string) (AuthConfig, error) {
func DecodeAuth(authStr string) (*AuthConfig, error) {
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return AuthConfig{}, err
return nil, err
}
if n > decLen {
return AuthConfig{}, errors.New("something went wrong decoding auth config")
return nil, fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.Split(string(decoded), ":")
if len(arr) != 2 {
return nil, fmt.Errorf("Invalid auth configuration file")
}
password := strings.Trim(arr[1], "\x00")
return AuthConfig{Username: arr[0], Password: password}, nil
return &AuthConfig{Username: arr[0], Password: password}, nil
}
// load up the auth config information and return values
func LoadConfig() (AuthConfig, error) {
if _, err := os.Stat(CONFIGFILE); err == nil {
b, err := ioutil.ReadFile(CONFIGFILE)
if err != nil {
return AuthConfig{}, err
}
arr := strings.Split(string(b), "\n")
orig_auth := strings.Split(arr[0], " = ")
orig_email := strings.Split(arr[1], " = ")
authConfig, err := DecodeAuth(orig_auth[1])
if err != nil {
return AuthConfig{}, err
}
authConfig.Email = orig_email[1]
return authConfig, nil
} else {
return AuthConfig{}, nil
// FIXME: use the internal golang config parser
func LoadConfig(rootPath string) (*AuthConfig, error) {
confFile := path.Join(rootPath, CONFIGFILE)
if _, err := os.Stat(confFile); err != nil {
return &AuthConfig{}, fmt.Errorf("The Auth config file is missing")
}
return AuthConfig{}, nil
b, err := ioutil.ReadFile(confFile)
if err != nil {
return nil, err
}
arr := strings.Split(string(b), "\n")
orig_auth := strings.Split(arr[0], " = ")
orig_email := strings.Split(arr[1], " = ")
authConfig, err := DecodeAuth(orig_auth[1])
if err != nil {
return nil, err
}
authConfig.Email = orig_email[1]
authConfig.rootPath = rootPath
return authConfig, nil
}
// save the auth config
func saveConfig(authStr string, email string) error {
func saveConfig(rootPath, authStr string, email string) error {
lines := "auth = " + authStr + "\n" + "email = " + email + "\n"
b := []byte(lines)
err := ioutil.WriteFile(CONFIGFILE, b, 0600)
err := ioutil.WriteFile(path.Join(rootPath, CONFIGFILE), b, 0600)
if err != nil {
return err
}
@@ -84,7 +99,7 @@ func saveConfig(authStr string, email string) error {
}
// try to register/login to the registry server
func Login(authConfig AuthConfig) (string, error) {
func Login(authConfig *AuthConfig) (string, error) {
storeConfig := false
reqStatusCode := 0
var status string
@@ -145,7 +160,7 @@ func Login(authConfig AuthConfig) (string, error) {
}
if storeConfig {
authStr := EncodeAuth(authConfig)
saveConfig(authStr, authConfig.Email)
saveConfig(authConfig.rootPath, authStr, authConfig.Email)
}
return status, nil
}

View File

@@ -5,7 +5,7 @@ import (
)
func TestEncodeAuth(t *testing.T) {
newAuthConfig := AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
authStr := EncodeAuth(newAuthConfig)
decAuthConfig, err := DecodeAuth(authStr)
if err != nil {

View File

@@ -1,4 +1,4 @@
package fs
package docker
import (
"fmt"
@@ -33,24 +33,15 @@ func (change *Change) String() string {
return fmt.Sprintf("%s %s", kind, change.Path)
}
func (store *Store) Changes(mp *Mountpoint) ([]Change, error) {
func Changes(layers []string, rw string) ([]Change, error) {
var changes []Change
image, err := store.Get(mp.Image)
if err != nil {
return nil, err
}
layers, err := image.layers()
if err != nil {
return nil, err
}
err = filepath.Walk(mp.Rw, func(path string, f os.FileInfo, err error) error {
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(mp.Rw, path)
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
@@ -113,15 +104,3 @@ func (store *Store) Changes(mp *Mountpoint) ([]Change, error) {
}
return changes, nil
}
// Reset removes all changes to the filesystem, reverting it to its initial state.
func (mp *Mountpoint) Reset() error {
if err := os.RemoveAll(mp.Rw); err != nil {
return err
}
// We removed the RW directory itself along with its content: let's re-create an empty one.
if err := mp.createFolders(); err != nil {
return err
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,8 @@ package docker
import (
"encoding/json"
"errors"
"github.com/dotcloud/docker/fs"
"flag"
"fmt"
"github.com/kr/pty"
"io"
"io/ioutil"
@@ -16,40 +17,33 @@ import (
"time"
)
var sysInitPath string
func init() {
sysInitPath = SelfPath()
}
type Container struct {
Id string
Root string
root string
Id string
Created time.Time
Path string
Args []string
Config *Config
Mountpoint *fs.Mountpoint
State *State
Image string
Config *Config
State State
Image string
network *NetworkInterface
networkManager *NetworkManager
NetworkSettings *NetworkSettings
SysInitPath string
lxcConfigPath string
cmd *exec.Cmd
stdout *writeBroadcaster
stderr *writeBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
SysInitPath string
cmd *exec.Cmd
stdout *writeBroadcaster
stderr *writeBroadcaster
stdin io.ReadCloser
stdinPipe io.WriteCloser
stdoutLog *os.File
stderrLog *os.File
runtime *Runtime
}
type Config struct {
@@ -57,9 +51,43 @@ type Config struct {
User string
Memory int64 // Memory limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap
Detach bool
Ports []int
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
Env []string
Cmd []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
}
func ParseRun(args []string) (*Config, error) {
cmd := flag.NewFlagSet("", flag.ContinueOnError)
cmd.SetOutput(ioutil.Discard)
fl_user := cmd.String("u", "", "Username or UID")
fl_detach := cmd.Bool("d", false, "Detached mode: leave the container running in the background")
fl_stdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
fl_tty := cmd.Bool("t", false, "Allocate a pseudo-tty")
fl_memory := cmd.Int64("m", 0, "Memory limit (in bytes)")
var fl_ports ports
cmd.Var(&fl_ports, "p", "Map a network port to the container")
var fl_env ListOpts
cmd.Var(&fl_env, "e", "Set environment variables")
if err := cmd.Parse(args); err != nil {
return nil, err
}
config := &Config{
Ports: fl_ports,
User: *fl_user,
Tty: *fl_tty,
OpenStdin: *fl_stdin,
Memory: *fl_memory,
Detach: *fl_detach,
Env: fl_env,
Cmd: cmd.Args()[1:],
Image: cmd.Arg(0),
}
return config, nil
}
type NetworkSettings struct {
@@ -69,106 +97,6 @@ type NetworkSettings struct {
PortMapping map[string]string
}
func createContainer(id string, root string, command string, args []string, image *fs.Image, config *Config, netManager *NetworkManager) (*Container, error) {
mountpoint, err := image.Mountpoint(path.Join(root, "rootfs"), path.Join(root, "rw"))
if err != nil {
return nil, err
}
container := &Container{
Id: id,
Root: root,
Created: time.Now(),
Path: command,
Args: args,
Config: config,
Image: image.Id,
Mountpoint: mountpoint,
State: newState(),
networkManager: netManager,
NetworkSettings: &NetworkSettings{},
SysInitPath: sysInitPath,
lxcConfigPath: path.Join(root, "config.lxc"),
stdout: newWriteBroadcaster(),
stderr: newWriteBroadcaster(),
}
if err := os.Mkdir(root, 0700); err != nil {
return nil, err
}
// Setup logging of stdout and stderr to disk
if stdoutLog, err := os.OpenFile(path.Join(container.Root, id+"-stdout.log"), os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600); err != nil {
return nil, err
} else {
container.stdoutLog = stdoutLog
}
if stderrLog, err := os.OpenFile(path.Join(container.Root, id+"-stderr.log"), os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600); err != nil {
return nil, err
} else {
container.stderrLog = stderrLog
}
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
} else {
container.stdinPipe = NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
container.stdout.AddWriter(NopWriteCloser(container.stdoutLog))
container.stderr.AddWriter(NopWriteCloser(container.stderrLog))
if err := container.save(); err != nil {
return nil, err
}
return container, nil
}
func loadContainer(store *fs.Store, containerPath string, netManager *NetworkManager) (*Container, error) {
data, err := ioutil.ReadFile(path.Join(containerPath, "config.json"))
if err != nil {
return nil, err
}
mountpoint, err := store.FetchMountpoint(
path.Join(containerPath, "rootfs"),
path.Join(containerPath, "rw"),
)
if err != nil {
return nil, err
} else if mountpoint == nil {
return nil, errors.New("Couldn't load container: unregistered mountpoint.")
}
container := &Container{
stdout: newWriteBroadcaster(),
stderr: newWriteBroadcaster(),
lxcConfigPath: path.Join(containerPath, "config.lxc"),
networkManager: netManager,
NetworkSettings: &NetworkSettings{},
Mountpoint: mountpoint,
}
// Load container settings
if err := json.Unmarshal(data, container); err != nil {
return nil, err
}
// Setup logging of stdout and stderr to disk
if stdoutLog, err := os.OpenFile(path.Join(container.Root, container.Id+"-stdout.log"), os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600); err != nil {
return nil, err
} else {
container.stdoutLog = stdoutLog
}
if stderrLog, err := os.OpenFile(path.Join(container.Root, container.Id+"-stderr.log"), os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600); err != nil {
return nil, err
} else {
container.stderrLog = stderrLog
}
container.stdout.AddWriter(NopWriteCloser(container.stdoutLog))
container.stderr.AddWriter(NopWriteCloser(container.stderrLog))
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
} else {
container.stdinPipe = NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
container.State = newState()
return container, nil
}
func (container *Container) Cmd() *exec.Cmd {
return container.cmd
}
@@ -177,64 +105,32 @@ func (container *Container) When() time.Time {
return container.Created
}
func (container *Container) loadUserData() (map[string]string, error) {
jsonData, err := ioutil.ReadFile(path.Join(container.Root, "userdata.json"))
if err != nil {
if os.IsNotExist(err) {
return make(map[string]string), nil
}
return nil, err
}
data := make(map[string]string)
if err := json.Unmarshal(jsonData, &data); err != nil {
return nil, err
}
return data, nil
}
func (container *Container) saveUserData(data map[string]string) error {
jsonData, err := json.Marshal(data)
func (container *Container) FromDisk() error {
data, err := ioutil.ReadFile(container.jsonPath())
if err != nil {
return err
}
return ioutil.WriteFile(path.Join(container.Root, "userdata.json"), jsonData, 0700)
}
func (container *Container) SetUserData(key, value string) error {
data, err := container.loadUserData()
if err != nil {
// Load container settings
if err := json.Unmarshal(data, container); err != nil {
return err
}
data[key] = value
return container.saveUserData(data)
return nil
}
func (container *Container) GetUserData(key string) string {
data, err := container.loadUserData()
if err != nil {
return ""
}
if value, exists := data[key]; exists {
return value
}
return ""
}
func (container *Container) save() (err error) {
func (container *Container) ToDisk() (err error) {
data, err := json.Marshal(container)
if err != nil {
return
}
return ioutil.WriteFile(path.Join(container.Root, "config.json"), data, 0666)
return ioutil.WriteFile(container.jsonPath(), data, 0666)
}
func (container *Container) generateLXCConfig() error {
fo, err := os.Create(container.lxcConfigPath)
fo, err := os.Create(container.lxcConfigPath())
if err != nil {
return err
}
defer fo.Close()
if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
return err
}
@@ -309,7 +205,7 @@ func (container *Container) start() error {
}
func (container *Container) Start() error {
if err := container.Mountpoint.EnsureMounted(); err != nil {
if err := container.EnsureMounted(); err != nil {
return err
}
if err := container.allocateNetwork(); err != nil {
@@ -320,7 +216,7 @@ func (container *Container) Start() error {
}
params := []string{
"-n", container.Id,
"-f", container.lxcConfigPath,
"-f", container.lxcConfigPath(),
"--",
"/sbin/init",
}
@@ -339,6 +235,15 @@ func (container *Container) Start() error {
container.cmd = exec.Command("/usr/bin/lxc-start", params...)
// Setup environment
container.cmd.Env = append(
[]string{
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
container.Config.Env...,
)
var err error
if container.Config.Tty {
err = container.startPty()
@@ -348,8 +253,10 @@ func (container *Container) Start() error {
if err != nil {
return err
}
// FIXME: save state on disk *first*, then converge
// this way disk state is used as a journal, eg. we can restore after crash etc.
container.State.setRunning(container.cmd.Process.Pid)
container.save()
container.ToDisk()
go container.monitor()
return nil
}
@@ -389,30 +296,14 @@ func (container *Container) StdoutPipe() (io.ReadCloser, error) {
return newBufReader(reader), nil
}
func (container *Container) StdoutLog() io.Reader {
r, err := os.Open(container.stdoutLog.Name())
if err != nil {
return nil
}
return r
}
func (container *Container) StderrPipe() (io.ReadCloser, error) {
reader, writer := io.Pipe()
container.stderr.AddWriter(writer)
return newBufReader(reader), nil
}
func (container *Container) StderrLog() io.Reader {
r, err := os.Open(container.stderrLog.Name())
if err != nil {
return nil
}
return r
}
func (container *Container) allocateNetwork() error {
iface, err := container.networkManager.Allocate()
iface, err := container.runtime.networkManager.Allocate()
if err != nil {
return err
}
@@ -450,7 +341,7 @@ func (container *Container) monitor() {
}
container.stdout.Close()
container.stderr.Close()
if err := container.Mountpoint.Umount(); err != nil {
if err := container.Unmount(); err != nil {
log.Printf("%v: Failed to umount filesystem: %v", container.Id, err)
}
@@ -461,7 +352,7 @@ func (container *Container) monitor() {
// Report status back
container.State.setStopped(exitCode)
container.save()
container.ToDisk()
}
func (container *Container) kill() error {
@@ -523,6 +414,17 @@ func (container *Container) Wait() int {
return container.State.ExitCode
}
func (container *Container) ExportRw() (Archive, error) {
return Tar(container.rwPath(), Uncompressed)
}
func (container *Container) Export() (Archive, error) {
if err := container.EnsureMounted(); err != nil {
return nil, err
}
return Tar(container.RootfsPath(), Uncompressed)
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
done := make(chan bool)
go func() {
@@ -538,3 +440,75 @@ func (container *Container) WaitTimeout(timeout time.Duration) error {
}
return nil
}
func (container *Container) EnsureMounted() error {
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
return nil
}
return container.Mount()
}
func (container *Container) Mount() error {
image, err := container.GetImage()
if err != nil {
return err
}
return image.Mount(container.RootfsPath(), container.rwPath())
}
func (container *Container) Changes() ([]Change, error) {
image, err := container.GetImage()
if err != nil {
return nil, err
}
return image.Changes(container.rwPath())
}
func (container *Container) GetImage() (*Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
return container.runtime.graph.Get(container.Image)
}
func (container *Container) Mounted() (bool, error) {
return Mounted(container.RootfsPath())
}
func (container *Container) Unmount() error {
return Unmount(container.RootfsPath())
}
func (container *Container) logPath(name string) string {
return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.Id, name))
}
func (container *Container) ReadLog(name string) (io.Reader, error) {
return os.Open(container.logPath(name))
}
func (container *Container) jsonPath() string {
return path.Join(container.root, "config.json")
}
func (container *Container) lxcConfigPath() string {
return path.Join(container.root, "config.lxc")
}
// This method must be exported to be used from the lxc template
func (container *Container) RootfsPath() string {
return path.Join(container.root, "rootfs")
}
func (container *Container) rwPath() string {
return path.Join(container.root, "rw")
}
func validateId(id string) error {
if id == "" {
return fmt.Errorf("Invalid empty id")
}
return nil
}

View File

@@ -3,7 +3,6 @@ package docker
import (
"bufio"
"fmt"
"github.com/dotcloud/docker/fs"
"io"
"io/ioutil"
"math/rand"
@@ -15,24 +14,22 @@ import (
)
func TestCommitRun(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container1, err := docker.Create(
"precommit_test",
"/bin/sh",
[]string{"-c", "echo hello > /world"},
GetTestImage(docker),
defer nuke(runtime)
container1, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/sh", "-c", "echo hello > /world"},
Memory: 33554432,
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container1)
defer runtime.Destroy(container1)
if container1.State.Running {
t.Errorf("Container shouldn't be running")
@@ -44,37 +41,28 @@ func TestCommitRun(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
// FIXME: freeze the container before copying it to avoid data corruption?
rwTar, err := fs.Tar(container1.Mountpoint.Rw, fs.Uncompressed)
rwTar, err := container1.ExportRw()
if err != nil {
t.Error(err)
}
// Create a new image from the container's base layers + a new layer from container changes
parentImg, err := docker.Store.Get(container1.Image)
if err != nil {
t.Error(err)
}
img, err := docker.Store.Create(rwTar, parentImg, "test_commitrun", "unit test commited image")
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image")
if err != nil {
t.Error(err)
}
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world
container2, err := docker.Create(
"postcommit_test",
"cat",
[]string{"/world"},
img,
container2, err := runtime.Create(
&Config{
Image: img.Id,
Memory: 33554432,
Cmd: []string{"cat", "/world"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container2)
defer runtime.Destroy(container2)
stdout, err := container2.StdoutPipe()
stderr, err := container2.StderrPipe()
@@ -92,24 +80,22 @@ func TestCommitRun(t *testing.T) {
}
func TestRun(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"run_test",
"ls",
[]string{"-al"},
GetTestImage(docker),
defer nuke(runtime)
container, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Memory: 33554432,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
@@ -123,22 +109,21 @@ func TestRun(t *testing.T) {
}
func TestOutput(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"output_test",
"echo",
[]string{"-n", "foobar"},
GetTestImage(docker),
&Config{},
defer nuke(runtime)
container, err := runtime.Create(
&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foobar"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@@ -149,22 +134,20 @@ func TestOutput(t *testing.T) {
}
func TestKill(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"stop_test",
"cat",
[]string{"/dev/zero"},
GetTestImage(docker),
&Config{},
defer nuke(runtime)
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
if container.State.Running {
t.Errorf("Container shouldn't be running")
@@ -192,38 +175,35 @@ func TestKill(t *testing.T) {
}
func TestExitCode(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
defer nuke(runtime)
trueContainer, err := docker.Create(
"exit_test_1",
"/bin/true",
[]string{""},
GetTestImage(docker),
&Config{},
trueContainer, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/true", ""},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(trueContainer)
defer runtime.Destroy(trueContainer)
if err := trueContainer.Run(); err != nil {
t.Fatal(err)
}
falseContainer, err := docker.Create(
"exit_test_2",
"/bin/false",
[]string{""},
GetTestImage(docker),
&Config{},
falseContainer, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/false", ""},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(falseContainer)
defer runtime.Destroy(falseContainer)
if err := falseContainer.Run(); err != nil {
t.Fatal(err)
}
@@ -238,22 +218,20 @@ func TestExitCode(t *testing.T) {
}
func TestRestart(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"restart_test",
"echo",
[]string{"-n", "foobar"},
GetTestImage(docker),
&Config{},
defer nuke(runtime)
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foobar"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@@ -273,24 +251,22 @@ func TestRestart(t *testing.T) {
}
func TestRestartStdin(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"restart_stdin_test",
"cat",
[]string{},
GetTestImage(docker),
&Config{
OpenStdin: true,
},
defer nuke(runtime)
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
stdout, err := container.StdoutPipe()
@@ -323,24 +299,22 @@ func TestRestartStdin(t *testing.T) {
}
func TestUser(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
defer nuke(runtime)
// Default user must be root
container, err := docker.Create(
"user_default",
"id",
[]string{},
GetTestImage(docker),
&Config{},
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
t.Fatal(err)
@@ -350,19 +324,17 @@ func TestUser(t *testing.T) {
}
// Set a username
container, err = docker.Create(
"user_root",
"id",
[]string{},
GetTestImage(docker),
&Config{
User: "root",
},
container, err = runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "root",
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
@@ -372,19 +344,17 @@ func TestUser(t *testing.T) {
}
// Set a UID
container, err = docker.Create(
"user_uid0",
"id",
[]string{},
GetTestImage(docker),
&Config{
User: "0",
},
container, err = runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "0",
},
)
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
@@ -394,19 +364,17 @@ func TestUser(t *testing.T) {
}
// Set a different user by uid
container, err = docker.Create(
"user_uid1",
"id",
[]string{},
GetTestImage(docker),
&Config{
User: "1",
},
container, err = runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "1",
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil {
t.Fatal(err)
@@ -418,19 +386,17 @@ func TestUser(t *testing.T) {
}
// Set a different user by username
container, err = docker.Create(
"user_daemon",
"id",
[]string{},
GetTestImage(docker),
&Config{
User: "daemon",
},
container, err = runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"id"},
User: "daemon",
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err = container.Output()
if err != nil || container.State.ExitCode != 0 {
t.Fatal(err)
@@ -441,35 +407,31 @@ func TestUser(t *testing.T) {
}
func TestMultipleContainers(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
defer nuke(runtime)
container1, err := docker.Create(
"container1",
"cat",
[]string{"/dev/zero"},
GetTestImage(docker),
&Config{},
container1, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container1)
defer runtime.Destroy(container1)
container2, err := docker.Create(
"container2",
"cat",
[]string{"/dev/zero"},
GetTestImage(docker),
&Config{},
container2, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat", "/dev/zero"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container2)
defer runtime.Destroy(container2)
// Start both containers
if err := container1.Start(); err != nil {
@@ -498,24 +460,22 @@ func TestMultipleContainers(t *testing.T) {
}
func TestStdin(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"stdin_test",
"cat",
[]string{},
GetTestImage(docker),
&Config{
OpenStdin: true,
},
defer nuke(runtime)
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
stdout, err := container.StdoutPipe()
@@ -534,24 +494,22 @@ func TestStdin(t *testing.T) {
}
func TestTty(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"tty_test",
"cat",
[]string{},
GetTestImage(docker),
&Config{
OpenStdin: true,
},
defer nuke(runtime)
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"cat"},
OpenStdin: true,
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
stdin, err := container.StdinPipe()
stdout, err := container.StdoutPipe()
@@ -570,22 +528,20 @@ func TestTty(t *testing.T) {
}
func TestEnv(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"env_test",
"/usr/bin/env",
[]string{},
GetTestImage(docker),
&Config{},
defer nuke(runtime)
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/usr/bin/env"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
stdout, err := container.StdoutPipe()
if err != nil {
t.Fatal(err)
@@ -640,56 +596,52 @@ func grepFile(t *testing.T, path string, pattern string) {
}
func TestLXCConfig(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
defer nuke(runtime)
// Memory is allocated randomly for testing
rand.Seed(time.Now().UTC().UnixNano())
memMin := 33554432
memMax := 536870912
mem := memMin + rand.Intn(memMax-memMin)
container, err := docker.Create(
"config_test",
"/bin/true",
[]string{},
GetTestImage(docker),
&Config{
Hostname: "foobar",
Memory: int64(mem),
},
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"/bin/true"},
Hostname: "foobar",
Memory: int64(mem),
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
container.generateLXCConfig()
grepFile(t, container.lxcConfigPath, "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath,
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar")
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
grepFile(t, container.lxcConfigPath,
grepFile(t, container.lxcConfigPath(),
fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2))
}
func BenchmarkRunSequencial(b *testing.B) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
defer nuke(docker)
defer nuke(runtime)
for i := 0; i < b.N; i++ {
container, err := docker.Create(
fmt.Sprintf("bench_%v", i),
"echo",
[]string{"-n", "foo"},
GetTestImage(docker),
&Config{},
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foo"},
},
)
if err != nil {
b.Fatal(err)
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
output, err := container.Output()
if err != nil {
b.Fatal(err)
@@ -697,18 +649,18 @@ func BenchmarkRunSequencial(b *testing.B) {
if string(output) != "foo" {
b.Fatalf("Unexecpted output: %v", string(output))
}
if err := docker.Destroy(container); err != nil {
if err := runtime.Destroy(container); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkRunParallel(b *testing.B) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
b.Fatal(err)
}
defer nuke(docker)
defer nuke(runtime)
var tasks []chan error
@@ -716,18 +668,16 @@ func BenchmarkRunParallel(b *testing.B) {
complete := make(chan error)
tasks = append(tasks, complete)
go func(i int, complete chan error) {
container, err := docker.Create(
fmt.Sprintf("bench_%v", i),
"echo",
[]string{"-n", "foo"},
GetTestImage(docker),
&Config{},
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"echo", "-n", "foo"},
},
)
if err != nil {
complete <- err
return
}
defer docker.Destroy(container)
defer runtime.Destroy(container)
if err := container.Start(); err != nil {
complete <- err
return
@@ -739,7 +689,7 @@ func BenchmarkRunParallel(b *testing.B) {
// if string(output) != "foo" {
// complete <- fmt.Errorf("Unexecpted output: %v", string(output))
// }
if err := docker.Destroy(container); err != nil {
if err := runtime.Destroy(container); err != nil {
complete <- err
return
}

View File

@@ -8,13 +8,13 @@ GITHUB_PATH=src/github.com/dotcloud/docker
INSDIR=usr/bin
SOURCE_PACKAGE=$(PKG_NAME)_$(PKG_VERSION).orig.tar.gz
DEB_PACKAGE=$(PKG_NAME)_$(PKG_VERSION)_$(PKG_ARCH).deb
EXTRA_GO_PKG=fs auth
EXTRA_GO_PKG=./auth
TMPDIR=$(shell mktemp -d -t XXXXXX)
# Build a debian source package
all: build_in_deb
all: clean build_in_deb
build_in_deb:
echo "GOPATH = " $(ROOT_PATH)
@@ -29,7 +29,7 @@ install:
install -m 0755 bin/docker $(DESTDIR)/$(INSDIR)
install -o root -m 0755 etc/docker.upstart $(DESTDIR)/etc/init/docker.conf
$(BUILD_SRC): cleanup
$(BUILD_SRC): clean
# Copy ourselves into $BUILD_SRC to comply with unusual golang constraints
tar --exclude=*.tar.gz --exclude=checkout.tgz -f checkout.tgz -cz *
mkdir -p $(BUILD_SRC)/$(GITHUB_PATH)
@@ -63,22 +63,11 @@ build_local:
gotest:
@echo "\033[36m[Testing]\033[00m docker..."
@sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v && \
@sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v . $(EXTRA_GO_PKG) && \
echo -n "\033[32m[OK]\033[00m" || \
echo -n "\033[31m[FAIL]\033[00m"; \
echo " docker"
@echo "Testing extra repos {$(EXTRA_GO_PKG)}"
@for package in $(EXTRA_GO_PKG); do \
echo "\033[36m[Testing]\033[00m docker/$$package..." && \
cd $$package ; \
sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v && \
echo -n "\033[32m[OK]\033[00m" || \
echo -n "\033[31m[FAIL]\033[00m" ; \
echo " docker/$$package" ; \
cd .. ;\
done
@sudo rm -rf /tmp/docker-*
cleanup:
rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files $(BUILD_SRC) checkout.tgz
clean:
rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files $(BUILD_SRC) checkout.tgz bin

161
docker.go
View File

@@ -1,161 +0,0 @@
package docker
import (
"container/list"
"fmt"
"github.com/dotcloud/docker/fs"
"io/ioutil"
"log"
"os"
"path"
"sort"
)
type Docker struct {
root string
repository string
containers *list.List
networkManager *NetworkManager
Store *fs.Store
}
func (docker *Docker) List() []*Container {
containers := new(History)
for e := docker.containers.Front(); e != nil; e = e.Next() {
containers.Add(e.Value.(*Container))
}
return *containers
}
func (docker *Docker) getContainerElement(id string) *list.Element {
for e := docker.containers.Front(); e != nil; e = e.Next() {
container := e.Value.(*Container)
if container.Id == id {
return e
}
}
return nil
}
func (docker *Docker) Get(id string) *Container {
e := docker.getContainerElement(id)
if e == nil {
return nil
}
return e.Value.(*Container)
}
func (docker *Docker) Exists(id string) bool {
return docker.Get(id) != nil
}
func (docker *Docker) Create(id string, command string, args []string, image *fs.Image, config *Config) (*Container, error) {
if docker.Exists(id) {
return nil, fmt.Errorf("Container %v already exists", id)
}
root := path.Join(docker.repository, id)
container, err := createContainer(id, root, command, args, image, config, docker.networkManager)
if err != nil {
return nil, err
}
docker.containers.PushBack(container)
return container, nil
}
func (docker *Docker) Destroy(container *Container) error {
element := docker.getContainerElement(container.Id)
if element == nil {
return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.Id)
}
if err := container.Stop(); err != nil {
return err
}
if container.Mountpoint.Mounted() {
if err := container.Mountpoint.Umount(); err != nil {
return fmt.Errorf("Unable to umount container %v: %v", container.Id, err)
}
}
if err := container.Mountpoint.Deregister(); err != nil {
return fmt.Errorf("Unable to deregiser -- ? mountpoint %v: %v", container.Mountpoint.Root, err)
}
if err := os.RemoveAll(container.Root); err != nil {
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.Id, err)
}
docker.containers.Remove(element)
return nil
}
func (docker *Docker) restore() error {
dir, err := ioutil.ReadDir(docker.repository)
if err != nil {
return err
}
for _, v := range dir {
container, err := loadContainer(docker.Store, path.Join(docker.repository, v.Name()), docker.networkManager)
if err != nil {
log.Printf("Failed to load container %v: %v", v.Name(), err)
continue
}
docker.containers.PushBack(container)
}
return nil
}
func New() (*Docker, error) {
return NewFromDirectory("/var/lib/docker")
}
func NewFromDirectory(root string) (*Docker, error) {
docker_repo := path.Join(root, "containers")
if err := os.MkdirAll(docker_repo, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
store, err := fs.New(path.Join(root, "images"))
if err != nil {
return nil, err
}
netManager, err := newNetworkManager(networkBridgeIface)
if err != nil {
return nil, err
}
docker := &Docker{
root: root,
repository: docker_repo,
containers: list.New(),
Store: store,
networkManager: netManager,
}
if err := docker.restore(); err != nil {
return nil, err
}
return docker, nil
}
type History []*Container
func (history *History) Len() int {
return len(*history)
}
func (history *History) Less(i, j int) bool {
containers := *history
return containers[j].When().Before(containers[i].When())
}
func (history *History) Swap(i, j int) {
containers := *history
tmp := containers[i]
containers[i] = containers[j]
containers[j] = tmp
}
func (history *History) Add(container *Container) {
*history = append(*history, container)
sort.Sort(history)
}

View File

@@ -3,7 +3,6 @@ package main
import (
"flag"
"github.com/dotcloud/docker"
"github.com/dotcloud/docker/future"
"github.com/dotcloud/docker/rcli"
"github.com/dotcloud/docker/term"
"io"
@@ -17,8 +16,11 @@ func main() {
docker.SysInit()
return
}
// FIXME: Switch d and D ? (to be more sshd like)
fl_daemon := flag.Bool("d", false, "Daemon mode")
fl_debug := flag.Bool("D", false, "Debug mode")
flag.Parse()
rcli.DEBUG_FLAG = *fl_debug
if *fl_daemon {
if flag.NArg() != 0 {
flag.Usage()
@@ -57,11 +59,11 @@ func runCommand(args []string) error {
// closing the connection.
// See http://code.google.com/p/go/issues/detail?id=3345
if conn, err := rcli.Call("tcp", "127.0.0.1:4242", args...); err == nil {
receive_stdout := future.Go(func() error {
receive_stdout := docker.Go(func() error {
_, err := io.Copy(os.Stdout, conn)
return err
})
send_stdin := future.Go(func() error {
send_stdin := docker.Go(func() error {
_, err := io.Copy(conn, os.Stdin)
if err := conn.CloseWrite(); err != nil {
log.Printf("Couldn't send EOF: " + err.Error())

4
docs/README.md Normal file
View File

@@ -0,0 +1,4 @@
Docs readme
===========
Let's see if this file shows up as we hope it does.

View File

@@ -0,0 +1,220 @@
Docker concepts
===============
Image
-----
An image is a root filesystem + some metadata. It is uniquely identified by a SHA256, and it can be given a symbolic name as well (so you can `docker run mystuff` instead of `docker run 819f04e5706f5...`.
The metadata is a JSON blob. It contains at least:
- the hash of the parent (only if the image is based on another image; if it was created from scratch, then the parent is `null`),
- the creation date (this is when the `docker commit` command was done).
The hash of the image is defined as:
`SHA256(SHA256(jsondata)+SHA256(tarball))`
When you run something into an image, it automatically creates a container. The container has a unique ID, and when you `docker commit <container_id> mystuff`, you are creating a new image, and giving it the nickname `mystuff`.
Repository
----------
A repository:
- belongs to a specific user,
- has a given name chosen by the user,
- is a set of tagged images.
The typical use case is to group different versions of something under a repository.
Example: you are John Doe, maintainer of a collection of PostgreSQL images based on different releases of Ubuntu. Your docker ID is `jdoe`; you decide that the repository name will by `pgsql`. You pull a bunch of base images for the different Ubuntu releases, then you setup different versions of PostgreSQL in them. You end up with the following set of images:
- a base lucid image,
- a base precise image,
- a base quantal image,
- PostgreSQL 9.1 installed on top of the lucid image,
- PostgreSQL 9.2 installed on top of the lucid image,
- PostgreSQL 9.1 installed on top of the precise image,
- PostgreSQL 9.2 installed on top of the precise image,
- PostgreSQL 9.1 installed on top of the quantal image,
- PostgreSQL 9.2 installed on top of the quantal image,
- PostgreSQL 9.3 installed on top of the quantal image.
The first three won't be in the repository, but the other ones will. You decide that the tags will be lucid9.1, lucid9.2, precise9.1, etc.
Note: those images do not have to share a common ancestor. In this case, we have three "root" images (one for each base Ubuntu release).
When someone wants to use one of your images, he will do something like:
docker run -p 5432 jdoe/pgsql@lucid9.2 postgres -D /var/lib/...
Docker will do the following:
- notice that the image name contains a slash, and is therefore a reference to a repository;
- notice that the image name contains an arroba, and is therefore a reference to a specific version;
- query the docker registry to resolve jdoe/pgsql@lucid9.2 into an image ID;
- download the image metadata+tarball from the registry (unless it already has them locally);
- recursively download all the parent images of the image (unless it already has them locally);
- run the image.
There is one special version: `latest`. When you don't request a specific version, you are implying that you want the `latest` version. When you push a version (any version!) to the repository, you are also pushing to `latest` as well.
QUESTION: do we want to update `latest` even if the commit date of the image is older than the current `latest` image?
QUESTION: who should update `latest`? Should it be done by the docker client, or automatically done server-side?
Confused?
---------
Another way to put it: a "repository" is like the "download binaries" page for a given product of a software vendor. Once version 1.42.5 is there, it probably won't be modified (they will rather release 1.42.6), unless there was something really harmful or embarrassing in 1.42.5.
Storage of images
-----------------
Images are to be stored on S3.
A given image will be mapped to two S3 objects:
- s3://get.docker.io/images/<id>/json (plain JSON file)
- s3://get.docker.io/images/<id>/layer (tarball)
The S3 storage is authoritative. I.E. the registry will very probably keep some cache of the metadata, but it will be just a cache.
Storage of repositories
-----------------------
TBD
Pull images
-----------
Pulling an image is fairly straightforward:
GET /v1/images/<id>/json
GET /v1/images/<id>/layer
GET /v1/images/<id>/history
The first two calls redirect you to their S3 counterparts. But before redirecting you, the registry checks (probably with `HEAD` requests) that both `json` and `layer` objects actually exist on S3. I.E., if there was a partial upload, when you try to `GET` the `json` or the `layer` object, the registry will give you a 404 for both objects, even if one of them does exist.
The last one sends you a JSON payload, which is a list containing all the metadata of the image and all its ancestors. The requested image comes first, then its parent, then the parent of the parent, etc.
SUGGESTION: rename `history` to `ancestry` (it sounds more hipstery, but it's actually more accurate)
SUGGESTION: add optional parameter `?length=X` to `history`, so you can limit to `X` ancestors, and avoid pulling 42000 ancestors in one go - especially if you have most of them already...
Push images
-----------
The first thing is to push the meta data:
PUT /v1/images/<id>/json
Four things can happen:
- invalid/empty JSON: the server tells you to go away (HTTP 400?)
- image already exists with the same JSON: the server tells you that it's fine (HTTP 204?)
- image already exists but is different: the server informs you that something's wrong (?)
- image doesn't exist: the server puts the JSON on S3, then generates an upload URL for the tarball, and sends you an HTTP 200 containing this upload URL
In the latter case, you want to move to the next step:
PUT the tarball to whatever-URL-you-got-on-previous-stage
SUGGESTION: consider a `PUT /v1/images/<id>/layer` with `Except: 100-continue` and honor a 301/302 redirect. This might or might not be legal HTTP.
The last thing is to try to push the parent image (unless you're sure that it is already in the registry). If the image is already there, stop. If it's not there, upload it, and recursively upload its parents in a similar fashion.
Pull repository
---------------
This:
GET /v1/users/<userid>/<reponame>
Sends back a JSON dict mapping version tags to image version, e.g.:
{
"1.1": "87428fc522803d31065e7bce3cf03fe475096631e5e07bbd7a0fde60c4cf25c7",
"1.2": "0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f",
"latest": "0263829989b6fd954f72baaf2fc64bc2e2f01d692d4de72986ea808f6e99813f"
}
SUGGESTION: also allow this URL:
GET /v1/users/<userid>/<reponame>/<versiontag>
Which would send back the image version hash.
Push repository
---------------
This:
PUT /v1/users/<userid>/<reponame>/<versiontag>
The request body should be the image version hash.
Example session
---------------
First idea:
# Automatically pull base, aka docker/base@latest, and run something in it
docker run base ...
(Output: 42424242)
docker commit 42424242 databeze
docker login jdoe s3s4me!
# The following two commands are equivalent
docker push jdoe/pgsql databeze
docker push jdoe/pgsql 42424242
Second idea:
docker run base ...
docker commit 42424242 jdoe/pgsql
docker login jdoe s3s4me!
docker push jdoe/pgsql
Maybe this would work too:
docker commit 42424242 pgsql
docker push pgsql
And maybe this too:
docker push -a
NOTE: when your commit overwrites an existing tag, the image should be marked "dirty" so that docker knows that it has to be pushed.
NOTE: if a pull would cause some local tag to be overwritten, docker could refuse, and ask you to rename your local tag, or ask you to specify a -f flag to overwrite. Your local changes won't be lost, but the tag will be lost, so if yon don't know the image ID it could be hard to figure out which one it was.
NOTE: we probably need some commands to move/remove tags to images.
Collaborative workflow:
alice# docker login mybigco p455w0rd
bob# docker login mybigco p455w0rd
alice# docker pull base
alice# docker run -a -t -i base /bin/sh
... hard core authoring takes place ...
alice# docker commit <container_id> wwwbigco
alice# docker push wwwbigco
... the latter actually does docker push mybigco/wwwbigco@latest ...
bob# docker pull mybigco/wwwbigco
bob# docker run mybigco/wwwbigcom /usr/sbin/nginx
... change things ...
bob# docker commit <container_id> wwwbigco
bob# docker push wwwbigco
NOTE: what about this?

View File

@@ -1,74 +0,0 @@
package fake
import (
"archive/tar"
"bytes"
"github.com/kr/pty"
"io"
"math/rand"
"os/exec"
)
func FakeTar() (io.Reader, error) {
content := []byte("Hello world!\n")
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
hdr := new(tar.Header)
hdr.Size = int64(len(content))
hdr.Name = name
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
tw.Write([]byte(content))
}
tw.Close()
return buf, nil
}
func WriteFakeTar(dst io.Writer) error {
if data, err := FakeTar(); err != nil {
return err
} else if _, err := io.Copy(dst, data); err != nil {
return err
}
return nil
}
func RandomBytesChanged() uint {
return uint(rand.Int31n(24 * 1024 * 1024))
}
func RandomFilesChanged() uint {
return uint(rand.Int31n(42))
}
func RandomContainerSize() uint {
return uint(rand.Int31n(142 * 1024 * 1024))
}
func ContainerRunning() bool {
return false
}
func StartCommand(cmd *exec.Cmd, interactive bool) (io.WriteCloser, io.ReadCloser, error) {
if interactive {
term, err := pty.Start(cmd)
if err != nil {
return nil, nil, err
}
return term, term, nil
}
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, nil, err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, nil, err
}
if err := cmd.Start(); err != nil {
return nil, nil, err
}
return stdin, stdout, nil
}

View File

@@ -1,113 +0,0 @@
package fs
import (
"errors"
"fmt"
"github.com/dotcloud/docker/future"
"io/ioutil"
"os"
"path"
"path/filepath"
)
type LayerStore struct {
Root string
}
func NewLayerStore(root string) (*LayerStore, error) {
abspath, err := filepath.Abs(root)
if err != nil {
return nil, err
}
// Create the root directory if it doesn't exists
if err := os.Mkdir(root, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
return &LayerStore{
Root: abspath,
}, nil
}
func (store *LayerStore) List() []string {
files, err := ioutil.ReadDir(store.Root)
if err != nil {
return []string{}
}
var layers []string
for _, st := range files {
if st.IsDir() {
layers = append(layers, path.Join(store.Root, st.Name()))
}
}
return layers
}
func (store *LayerStore) Get(id string) string {
if !store.Exists(id) {
return ""
}
return store.layerPath(id)
}
func (store *LayerStore) rootExists() (bool, error) {
if stat, err := os.Stat(store.Root); err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
} else if !stat.IsDir() {
return false, errors.New("Not a directory: " + store.Root)
}
return true, nil
}
func (store *LayerStore) Init() error {
if exists, err := store.rootExists(); err != nil {
return err
} else if exists {
return nil
}
return os.Mkdir(store.Root, 0700)
}
func (store *LayerStore) Mktemp() (string, error) {
tmpName := future.RandomId()
tmpPath := path.Join(store.Root, "tmp-"+tmpName)
if err := os.Mkdir(tmpPath, 0700); err != nil {
return "", err
}
return tmpPath, nil
}
func (store *LayerStore) layerPath(id string) string {
return path.Join(store.Root, id)
}
func (store *LayerStore) AddLayer(id string, archive Archive) (string, error) {
if _, err := os.Stat(store.layerPath(id)); err == nil {
return "", fmt.Errorf("Layer already exists: %v", id)
}
tmp, err := store.Mktemp()
defer os.RemoveAll(tmp)
if err != nil {
return "", fmt.Errorf("Mktemp failed: %s", err)
}
if err := Untar(archive, tmp); err != nil {
return "", err
}
layer := store.layerPath(id)
if !store.Exists(id) {
if err := os.Rename(tmp, layer); err != nil {
return "", fmt.Errorf("Could not rename temp dir to layer %s: %s", layer, err)
}
}
return layer, nil
}
func (store *LayerStore) Exists(id string) bool {
st, err := os.Stat(store.layerPath(id))
if err != nil {
return false
}
return st.IsDir()
}

View File

@@ -1,77 +0,0 @@
package fs
import (
"github.com/dotcloud/docker/fake"
"io/ioutil"
"os"
"testing"
)
func TestLayersInit(t *testing.T) {
store := tempStore(t)
defer os.RemoveAll(store.Root)
// Root should exist
if _, err := os.Stat(store.Root); err != nil {
t.Fatal(err)
}
// List() should be empty
if l := store.List(); len(l) != 0 {
t.Fatalf("List() should return %d, not %d", 0, len(l))
}
}
func TestAddLayer(t *testing.T) {
store := tempStore(t)
defer os.RemoveAll(store.Root)
layer, err := store.AddLayer("foo", testArchive(t))
if err != nil {
t.Fatal(err)
}
// Layer path should exist
if _, err := os.Stat(layer); err != nil {
t.Fatal(err)
}
// List() should return 1 layer
if l := store.List(); len(l) != 1 {
t.Fatalf("List() should return %d elements, not %d", 1, len(l))
}
// Get("foo") should return the correct layer
if foo := store.Get("foo"); foo != layer {
t.Fatalf("get(\"foo\") should return '%d', not '%d'", layer, foo)
}
}
func TestAddLayerDuplicate(t *testing.T) {
store := tempStore(t)
defer os.RemoveAll(store.Root)
if _, err := store.AddLayer("foobar123", testArchive(t)); err != nil {
t.Fatal(err)
}
if _, err := store.AddLayer("foobar123", testArchive(t)); err == nil {
t.Fatalf("Creating duplicate layer should fail")
}
}
/*
* HELPER FUNCTIONS
*/
func tempStore(t *testing.T) *LayerStore {
tmp, err := ioutil.TempDir("", "docker-fs-layerstore-")
if err != nil {
t.Fatal(err)
}
store, err := NewLayerStore(tmp)
if err != nil {
t.Fatal(err)
}
return store
}
func testArchive(t *testing.T) Archive {
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
return archive
}

View File

@@ -1,7 +0,0 @@
package fs
import "errors"
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
return errors.New("mount is not implemented on darwin")
}

View File

@@ -1,7 +0,0 @@
package fs
import "syscall"
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
return syscall.Mount(source, target, fstype, flags, data)
}

View File

@@ -1,223 +0,0 @@
package fs
import (
"fmt"
"github.com/dotcloud/docker/fake"
"testing"
)
func countImages(store *Store) int {
paths, err := store.Images()
if err != nil {
panic(err)
}
return len(paths)
}
func TestRemoveInPath(t *testing.T) {
store, err := TempStore("test-remove-in-path")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 10 create / Delete all
for i := 0; i < 10; i++ {
if _, err := store.Create(archive, nil, "foo", "Testing"); err != nil {
t.Fatal(err)
}
}
if c := countImages(store); c != 10 {
t.Fatalf("Expected 10 images, %d found", c)
}
if err := store.RemoveInPath("foo"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 10 create / Delete 1
for i := 0; i < 10; i++ {
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
t.Fatal(err)
}
}
if c := countImages(store); c != 10 {
t.Fatalf("Expected 10 images, %d found", c)
}
if err := store.RemoveInPath("foo-0"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 9 {
t.Fatalf("Expected 9 images, %d found", c)
}
// Delete failure
if err := store.RemoveInPath("Not_Foo"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 9 {
t.Fatalf("Expected 9 images, %d found", c)
}
}
func TestRemove(t *testing.T) {
store, err := TempStore("test-remove")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 1 create / 1 delete
img, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 1 {
t.Fatalf("Expected 1 images, %d found", c)
}
if err := store.Remove(img); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 2 create (same name) / 1 delete
img1, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
img2, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 2 {
t.Fatalf("Expected 2 images, %d found", c)
}
if err := store.Remove(img1); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 1 {
t.Fatalf("Expected 1 images, %d found", c)
}
// Test delete wrong name
// Note: If we change orm and Delete of non existing return error, we will need to change this test
if err := store.Remove(&Image{Id: "Not_foo", store: img2.store}); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 1 {
t.Fatalf("Expected 1 images, %d found", c)
}
// Test delete last one
if err := store.Remove(img2); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
}
func TestRemoveRegexp(t *testing.T) {
store, err := TempStore("test-remove-regexp")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 10 create with different names / Delete all good regexp
for i := 0; i < 10; i++ {
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
t.Fatal(err)
}
}
if c := countImages(store); c != 10 {
t.Fatalf("Expected 10 images, %d found", c)
}
if err := store.RemoveRegexp("foo"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 10 create with different names / Delete all good regexp globing
for i := 0; i < 10; i++ {
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
t.Fatal(err)
}
}
if c := countImages(store); c != 10 {
t.Fatalf("Expected 10 images, %d found", c)
}
if err := store.RemoveRegexp("foo-*"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 10 create with different names / Delete all bad regexp
for i := 0; i < 10; i++ {
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
t.Fatal(err)
}
}
if c := countImages(store); c != 10 {
t.Fatalf("Expected 10 images, %d found", c)
}
if err := store.RemoveRegexp("oo-*"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 0 {
t.Fatalf("Expected 0 images, %d found", c)
}
// Test 10 create with different names / Delete none strict regexp
for i := 0; i < 10; i++ {
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
t.Fatal(err)
}
}
if c := countImages(store); c != 10 {
t.Fatalf("Expected 10 images, %d found", c)
}
if err := store.RemoveRegexp("^oo-"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 10 {
t.Fatalf("Expected 10 images, %d found", c)
}
// Test delete 2
if err := store.RemoveRegexp("^foo-[1,2]$"); err != nil {
t.Fatal(err)
}
if c := countImages(store); c != 8 {
t.Fatalf("Expected 8 images, %d found", c)
}
}

View File

@@ -1,521 +0,0 @@
package fs
import (
"database/sql"
"fmt"
"github.com/dotcloud/docker/future"
_ "github.com/mattn/go-sqlite3"
"github.com/shykes/gorp" //Forked to implement CreateTablesOpts
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"syscall"
"time"
)
type Store struct {
Root string
db *sql.DB
orm *gorp.DbMap
layers *LayerStore
}
type Archive io.Reader
func New(root string) (*Store, error) {
isNewStore := true
if err := os.Mkdir(root, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
db, err := sql.Open("sqlite3", path.Join(root, "db"))
if err != nil {
return nil, err
}
orm := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}}
orm.AddTableWithName(Image{}, "images").SetKeys(false, "Id")
orm.AddTableWithName(Path{}, "paths").SetKeys(false, "Path", "Image")
orm.AddTableWithName(Mountpoint{}, "mountpoints").SetKeys(false, "Root")
orm.AddTableWithName(Tag{}, "tags").SetKeys(false, "TagName")
if isNewStore {
if err := orm.CreateTablesOpts(true); err != nil {
return nil, err
}
}
layers, err := NewLayerStore(path.Join(root, "layers"))
if err != nil {
return nil, err
}
return &Store{
Root: root,
db: db,
orm: orm,
layers: layers,
}, nil
}
func (store *Store) imageList(src []interface{}) []*Image {
var images []*Image
for _, i := range src {
img := i.(*Image)
img.store = store
images = append(images, img)
}
return images
}
func (store *Store) Images() ([]*Image, error) {
images, err := store.orm.Select(Image{}, "select * from images")
if err != nil {
return nil, err
}
return store.imageList(images), nil
}
func (store *Store) Paths() ([]string, error) {
var paths []string
rows, err := store.db.Query("select distinct Path from paths order by Path")
if err != nil {
return nil, err
}
for rows.Next() {
var path string
if err := rows.Scan(&path); err != nil {
return nil, err
}
paths = append(paths, path)
}
return paths, nil
}
func (store *Store) RemoveInPath(pth string) error {
images, err := store.List(pth)
if err != nil {
return err
}
for _, img := range images {
if err = store.Remove(img); err != nil {
return err
}
}
return nil
}
// DeleteMatch deletes all images whose name matches `pattern`
func (store *Store) RemoveRegexp(pattern string) error {
// Retrieve all the paths
paths, err := store.Paths()
if err != nil {
return err
}
// Check the pattern on each elements
for _, pth := range paths {
if match, err := regexp.MatchString(pattern, pth); err != nil {
return err
} else if match {
// If there is a match, remove it
if err := store.RemoveInPath(pth); err != nil {
return nil
}
}
}
return nil
}
func (store *Store) Remove(img *Image) error {
_, err := store.orm.Delete(img)
return err
}
func (store *Store) List(pth string) ([]*Image, error) {
pth = path.Clean(pth)
images, err := store.orm.Select(Image{}, "select images.* from images, paths where Path=? and paths.Image=images.Id order by images.Created desc", pth)
if err != nil {
return nil, err
}
return store.imageList(images), nil
}
func (store *Store) Find(pth string) (*Image, error) {
pth = path.Clean(pth)
img, err := store.Get(pth)
if err != nil {
return nil, err
} else if img != nil {
return img, nil
}
var q string
var args []interface{}
// FIXME: this breaks if the path contains a ':'
// If format is path:rev
if parts := strings.SplitN(pth, ":", 2); len(parts) == 2 {
q = "select Images.* from images, paths where Path=? and images.Id=? and paths.Image=images.Id"
args = []interface{}{parts[0], parts[1]}
// If format is path:rev
} else {
q = "select images.* from images, paths where Path=? and paths.Image=images.Id order by images.Created desc limit 1"
args = []interface{}{parts[0]}
}
images, err := store.orm.Select(Image{}, q, args...)
if err != nil {
return nil, err
} else if len(images) < 1 {
return nil, nil
}
img = images[0].(*Image)
img.store = store
return img, nil
}
func (store *Store) Get(id string) (*Image, error) {
img, err := store.orm.Get(Image{}, id)
if img == nil {
return nil, err
}
res := img.(*Image)
res.store = store
return res, err
}
func (store *Store) Create(layerData Archive, parent *Image, pth, comment string) (*Image, error) {
// FIXME: actually do something with the layer...
img := &Image{
Id: future.RandomId(),
Comment: comment,
Created: time.Now().Unix(),
store: store,
}
if parent != nil {
img.Parent = parent.Id
}
// FIXME: Archive should contain compression info. For now we only support uncompressed.
err := store.Register(layerData, img, pth)
return img, err
}
func (store *Store) Register(layerData Archive, img *Image, pth string) error {
img.store = store
_, err := store.layers.AddLayer(img.Id, layerData)
if err != nil {
return fmt.Errorf("Could not add layer: %s", err)
}
pathObj := &Path{
Path: path.Clean(pth),
Image: img.Id,
}
trans, err := store.orm.Begin()
if err != nil {
return fmt.Errorf("Could not begin transaction: %s", err)
}
if err := trans.Insert(img); err != nil {
return fmt.Errorf("Could not insert image info: %s", err)
}
if err := trans.Insert(pathObj); err != nil {
return fmt.Errorf("Could not insert path info: %s", err)
}
if err := trans.Commit(); err != nil {
return fmt.Errorf("Could not commit transaction: %s", err)
}
return nil
}
func (store *Store) Layers() []string {
return store.layers.List()
}
type Image struct {
Id string
Parent string
Comment string
Created int64
store *Store `db:"-"`
}
func (image *Image) Copy(pth string) (*Image, error) {
if err := image.store.orm.Insert(&Path{Path: pth, Image: image.Id}); err != nil {
return nil, err
}
return image, nil
}
type Mountpoint struct {
Image string
Root string
Rw string
Store *Store `db:"-"`
}
func (image *Image) Mountpoint(root, rw string) (*Mountpoint, error) {
mountpoint := &Mountpoint{
Root: path.Clean(root),
Rw: path.Clean(rw),
Image: image.Id,
Store: image.store,
}
if err := image.store.orm.Insert(mountpoint); err != nil {
return nil, err
}
return mountpoint, nil
}
func (image *Image) layers() ([]string, error) {
var list []string
var err error
currentImg := image
for currentImg != nil {
if layer := image.store.layers.Get(currentImg.Id); layer != "" {
list = append(list, layer)
} else {
return list, fmt.Errorf("Layer not found for image %s", image.Id)
}
currentImg, err = currentImg.store.Get(currentImg.Parent)
if err != nil {
return list, fmt.Errorf("Error while getting parent image: %v", err)
}
}
if len(list) == 0 {
return nil, fmt.Errorf("No layer found for image %s\n", image.Id)
}
return list, nil
}
func (image *Image) Mountpoints() ([]*Mountpoint, error) {
var mountpoints []*Mountpoint
res, err := image.store.orm.Select(Mountpoint{}, "select * from mountpoints where Image=?", image.Id)
if err != nil {
return nil, err
}
for _, mp := range res {
mountpoints = append(mountpoints, mp.(*Mountpoint))
}
return mountpoints, nil
}
func (image *Image) Mount(root, rw string) (*Mountpoint, error) {
var mountpoint *Mountpoint
if mp, err := image.store.FetchMountpoint(root, rw); err != nil {
return nil, err
} else if mp == nil {
mountpoint, err = image.Mountpoint(root, rw)
if err != nil {
return nil, fmt.Errorf("Could not create mountpoint: %s", err)
} else if mountpoint == nil {
return nil, fmt.Errorf("No mountpoint created")
}
} else {
mountpoint = mp
}
if err := mountpoint.createFolders(); err != nil {
return nil, err
}
// FIXME: Now mount the layers
rwBranch := fmt.Sprintf("%v=rw", mountpoint.Rw)
roBranches := ""
layers, err := image.layers()
if err != nil {
return nil, err
}
for _, layer := range layers {
roBranches += fmt.Sprintf("%v=ro:", layer)
}
branches := fmt.Sprintf("br:%v:%v", rwBranch, roBranches)
if err := mount("none", mountpoint.Root, "aufs", 0, branches); err != nil {
return mountpoint, err
}
if !mountpoint.Mounted() {
return mountpoint, fmt.Errorf("Mount failed")
}
// FIXME: Create tests for deletion
// FIXME: move this part to change.go, maybe refactor
// fs.Change() to avoid the fake mountpoint
// Retrieve the changeset from the parent and apply it to the container
// - Retrieve the changes
changes, err := image.store.Changes(&Mountpoint{
Image: image.Id,
Root: layers[0],
Rw: layers[0],
Store: image.store})
if err != nil {
return nil, err
}
// Iterate on changes
for _, c := range changes {
// If there is a delete
if c.Kind == ChangeDelete {
// Make sure the directory exists
file_path, file_name := path.Dir(c.Path), path.Base(c.Path)
if err := os.MkdirAll(path.Join(mountpoint.Rw, file_path), 0755); err != nil {
return nil, err
}
// And create the whiteout (we just need to create empty file, discard the return)
if _, err := os.Create(path.Join(path.Join(mountpoint.Rw, file_path),
".wh."+path.Base(file_name))); err != nil {
return nil, err
}
}
}
return mountpoint, nil
}
func (mp *Mountpoint) EnsureMounted() error {
if mp.Mounted() {
return nil
}
img, err := mp.Store.Get(mp.Image)
if err != nil {
return err
}
_, err = img.Mount(mp.Root, mp.Rw)
return err
}
func (mp *Mountpoint) createFolders() error {
if err := os.Mkdir(mp.Root, 0755); err != nil && !os.IsExist(err) {
return err
}
if err := os.Mkdir(mp.Rw, 0755); err != nil && !os.IsExist(err) {
return err
}
return nil
}
func (mp *Mountpoint) Mounted() bool {
root, err := os.Stat(mp.Root)
if err != nil {
if os.IsNotExist(err) {
return false
}
panic(err)
}
parent, err := os.Stat(filepath.Join(mp.Root, ".."))
if err != nil {
panic(err)
}
rootSt := root.Sys().(*syscall.Stat_t)
parentSt := parent.Sys().(*syscall.Stat_t)
return rootSt.Dev != parentSt.Dev
}
func (mp *Mountpoint) Umount() error {
if !mp.Mounted() {
return fmt.Errorf("Mountpoint doesn't seem to be mounted")
}
if err := syscall.Unmount(mp.Root, 0); err != nil {
return fmt.Errorf("Unmount syscall failed: %v", err)
}
if mp.Mounted() {
return fmt.Errorf("Umount: Filesystem still mounted after calling umount(%v)", mp.Root)
}
// Even though we just unmounted the filesystem, AUFS will prevent deleting the mntpoint
// for some time. We'll just keep retrying until it succeeds.
for retries := 0; retries < 1000; retries++ {
err := os.Remove(mp.Root)
if err == nil {
// rm mntpoint succeeded
return nil
}
if os.IsNotExist(err) {
// mntpoint doesn't exist anymore. Success.
return nil
}
// fmt.Printf("(%v) Remove %v returned: %v\n", retries, mp.Root, err)
time.Sleep(10 * time.Millisecond)
}
return fmt.Errorf("Umount: Failed to umount %v", mp.Root)
}
func (mp *Mountpoint) Deregister() error {
if mp.Mounted() {
return fmt.Errorf("Mountpoint is currently mounted, can't deregister")
}
_, err := mp.Store.orm.Delete(mp)
return err
}
func (store *Store) FetchMountpoint(root, rw string) (*Mountpoint, error) {
res, err := store.orm.Select(Mountpoint{}, "select * from mountpoints where Root=? and Rw=?", root, rw)
if err != nil {
return nil, err
} else if len(res) < 1 || res[0] == nil {
return nil, nil
}
mp := res[0].(*Mountpoint)
mp.Store = store
return mp, nil
}
// OpenFile opens the named file for reading.
func (mp *Mountpoint) OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
if err := mp.EnsureMounted(); err != nil {
return nil, err
}
return os.OpenFile(filepath.Join(mp.Root, path), flag, perm)
}
// ReadDir reads the directory named by dirname, relative to the Mountpoint's root,
// and returns a list of sorted directory entries
func (mp *Mountpoint) ReadDir(dirname string) ([]os.FileInfo, error) {
if err := mp.EnsureMounted(); err != nil {
return nil, err
}
return ioutil.ReadDir(filepath.Join(mp.Root, dirname))
}
func (store *Store) AddTag(imageId, tagName string) error {
if image, err := store.Get(imageId); err != nil {
return err
} else if image == nil {
return fmt.Errorf("No image with ID %s", imageId)
}
err2 := store.orm.Insert(&Tag{
TagName: tagName,
Image: imageId,
})
return err2
}
func (store *Store) GetByTag(tagName string) (*Image, error) {
res, err := store.orm.Get(Tag{}, tagName)
if err != nil {
return nil, err
} else if res == nil {
return nil, fmt.Errorf("No image associated to tag \"%s\"", tagName)
}
tag := res.(*Tag)
img, err2 := store.Get(tag.Image)
if err2 != nil {
return nil, err2
} else if img == nil {
return nil, fmt.Errorf("Tag was found but image seems to be inexistent.")
}
return img, nil
}
type Path struct {
Path string
Image string
}
type Tag struct {
TagName string
Image string
}

View File

@@ -1,280 +0,0 @@
package fs
import (
"fmt"
"github.com/dotcloud/docker/fake"
"github.com/dotcloud/docker/future"
"io/ioutil"
"os"
"testing"
"time"
)
// FIXME: Remove the Fake package
func TestInit(t *testing.T) {
store, err := TempStore("testinit")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
paths, err := store.Paths()
if err != nil {
t.Fatal(err)
}
if l := len(paths); l != 0 {
t.Fatal("Fresh store should be empty after init (len=%d)", l)
}
}
// FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
// create multiple, check the amount of images and paths, etc..)
func TestCreate(t *testing.T) {
store, err := TempStore("testcreate")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
image, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
if images, err := store.Images(); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
if images, err := store.List("foo"); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Path foo has wrong number of images (should be %d, not %d)", 1, l)
} else if images[0].Id != image.Id {
t.Fatalf("Imported image should be listed at path foo (%s != %s)", images[0], image)
}
}
func TestRegister(t *testing.T) {
store, err := TempStore("testregister")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
image := &Image{
Id: future.RandomId(),
Comment: "testing",
Created: time.Now().Unix(),
store: store,
}
err = store.Register(archive, image, "foo")
if err != nil {
t.Fatal(err)
}
if images, err := store.Images(); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
if images, err := store.List("foo"); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Path foo has wrong number of images (should be %d, not %d)", 1, l)
} else if images[0].Id != image.Id {
t.Fatalf("Imported image should be listed at path foo (%s != %s)", images[0], image)
}
}
func TestTag(t *testing.T) {
store, err := TempStore("testtag")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
image, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
if images, err := store.Images(); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
if err := store.AddTag(image.Id, "baz"); err != nil {
t.Fatalf("Error while adding a tag to created image: %s", err)
}
if taggedImage, err := store.GetByTag("baz"); err != nil {
t.Fatalf("Error while trying to retrieve image for tag 'baz': %s", err)
} else if taggedImage.Id != image.Id {
t.Fatalf("Expected to retrieve image %s but found %s instead", image.Id, taggedImage.Id)
}
}
// Copy an image to a new path
func TestCopyNewPath(t *testing.T) {
store, err := TempStore("testcopynewpath")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
src, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
dst, err := src.Copy("bar")
if err != nil {
t.Fatal(err)
}
// ID should be the same
if src.Id != dst.Id {
t.Fatal("Different IDs")
}
// Check number of images at source path
if images, err := store.List("foo"); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatal("Wrong number of images at source path (should be %d, not %d)", 1, l)
}
// Check number of images at destination path
if images, err := store.List("bar"); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatal("Wrong number of images at destination path (should be %d, not %d)", 1, l)
}
if err := healthCheck(store); err != nil {
t.Fatal(err)
}
}
// Copying an image to the same path twice should fail
func TestCopySameName(t *testing.T) {
store, err := TempStore("testcopysamename")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
src, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
_, err = src.Copy("foo")
if err == nil {
t.Fatal("Copying an image to the same patch twice should fail.")
}
}
func TestMountPoint(t *testing.T) {
store, err := TempStore("test-mountpoint")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
image, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
mountpoint, err := image.Mountpoint("/tmp/a", "/tmp/b")
if err != nil {
t.Fatal(err)
}
if mountpoint.Root != "/tmp/a" {
t.Fatal("Wrong mountpoint root (should be %s, not %s)", "/tmp/a", mountpoint.Root)
}
if mountpoint.Rw != "/tmp/b" {
t.Fatal("Wrong mountpoint root (should be %s, not %s)", "/tmp/b", mountpoint.Rw)
}
}
func TestMountpointDuplicateRoot(t *testing.T) {
store, err := TempStore("test-mountpoint")
if err != nil {
t.Fatal(err)
}
defer nuke(store)
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
image, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
_, err = image.Mountpoint("/tmp/a", "/tmp/b")
if err != nil {
t.Fatal(err)
}
if _, err = image.Mountpoint("/tmp/a", "/tmp/foobar"); err == nil {
t.Fatal("Duplicate mountpoint root should fail")
}
}
func TempStore(prefix string) (*Store, error) {
dir, err := ioutil.TempDir("", "docker-fs-test-"+prefix)
if err != nil {
return nil, err
}
return New(dir)
}
func nuke(store *Store) error {
return os.RemoveAll(store.Root)
}
// Look for inconsistencies in a store.
func healthCheck(store *Store) error {
parents := make(map[string]bool)
paths, err := store.Paths()
if err != nil {
return err
}
for _, path := range paths {
images, err := store.List(path)
if err != nil {
return err
}
IDs := make(map[string]bool) // All IDs for this path
for _, img := range images {
// Check for duplicate IDs per path
if _, exists := IDs[img.Id]; exists {
return fmt.Errorf("Duplicate ID: %s", img.Id)
} else {
IDs[img.Id] = true
}
// Store parent for 2nd pass
if parent := img.Parent; parent != "" {
parents[parent] = true
}
}
}
// Check non-existing parents
for parent := range parents {
if _, exists := parents[parent]; !exists {
return fmt.Errorf("Reference to non-registered parent: %s", parent)
}
}
return nil
}

View File

@@ -1,136 +0,0 @@
package future
import (
"bytes"
"crypto/sha256"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"time"
)
func Seed() {
rand.Seed(time.Now().UTC().UnixNano())
}
func ComputeId(content io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, content); err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)[:8]), nil
}
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", d.Hours()/24/365)
}
func randomBytes() io.Reader {
return bytes.NewBuffer([]byte(fmt.Sprintf("%x", rand.Int())))
}
func RandomId() string {
id, _ := ComputeId(randomBytes()) // can't fail
return id
}
func Go(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
// Pv wraps an io.Reader such that it is passed through unchanged,
// but logs the number of bytes copied (comparable to the unix command pv)
func Pv(src io.Reader, info io.Writer) io.Reader {
var totalBytes int
data := make([]byte, 2048)
r, w := io.Pipe()
go func() {
for {
if n, err := src.Read(data); err != nil {
w.CloseWithError(err)
return
} else {
totalBytes += n
fmt.Fprintf(info, "--> %d bytes\n", totalBytes)
if _, err = w.Write(data[:n]); err != nil {
return
}
}
}
}()
return r
}
// Request a given URL and return an io.Reader
func Download(url string, stderr io.Writer) (*http.Response, error) {
var resp *http.Response
var err error = nil
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
}
return resp, nil
}
// Reader with progress bar
type progressReader struct {
reader io.ReadCloser // Stream to read from
output io.Writer // Where to send progress bar to
read_total int // Expected stream length (bytes)
read_progress int // How much has been read so far (bytes)
last_update int // How many bytes read at least update
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.read_progress += read
// Only update progress for every 1% read
update_every := int(0.01 * float64(r.read_total))
if r.read_progress-r.last_update > update_every || r.read_progress == r.read_total {
fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r",
r.read_progress,
r.read_total,
float64(r.read_progress)/float64(r.read_total)*100)
r.last_update = r.read_progress
}
// Send newline when complete
if err == io.EOF {
fmt.Fprintf(r.output, "\n")
}
return read, err
}
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader {
return &progressReader{r, output, size, 0, 0}
}

201
graph.go Normal file
View File

@@ -0,0 +1,201 @@
package docker
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"time"
)
type Graph struct {
Root string
}
func NewGraph(root string) (*Graph, error) {
abspath, err := filepath.Abs(root)
if err != nil {
return nil, err
}
// Create the root directory if it doesn't exists
if err := os.Mkdir(root, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
return &Graph{
Root: abspath,
}, nil
}
func (graph *Graph) Exists(id string) bool {
if _, err := graph.Get(id); err != nil {
return false
}
return true
}
func (graph *Graph) Get(id string) (*Image, error) {
// FIXME: return nil when the image doesn't exist, instead of an error
img, err := LoadImage(graph.imageRoot(id))
if err != nil {
return nil, err
}
if img.Id != id {
return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.Id)
}
img.graph = graph
return img, nil
}
func (graph *Graph) Create(layerData Archive, container *Container, comment string) (*Image, error) {
img := &Image{
Id: GenerateId(),
Comment: comment,
Created: time.Now(),
}
if container != nil {
img.Parent = container.Image
img.Container = container.Id
img.ContainerConfig = *container.Config
}
if err := graph.Register(layerData, img); err != nil {
return nil, err
}
return img, nil
}
func (graph *Graph) Register(layerData Archive, img *Image) error {
if err := ValidateId(img.Id); err != nil {
return err
}
// (This is a convenience to save time. Race conditions are taken care of by os.Rename)
if graph.Exists(img.Id) {
return fmt.Errorf("Image %s already exists", img.Id)
}
tmp, err := graph.Mktemp(img.Id)
defer os.RemoveAll(tmp)
if err != nil {
return fmt.Errorf("Mktemp failed: %s", err)
}
if err := StoreImage(img, layerData, tmp); err != nil {
return err
}
// Commit
if err := os.Rename(tmp, graph.imageRoot(img.Id)); err != nil {
return err
}
img.graph = graph
return nil
}
func (graph *Graph) Mktemp(id string) (string, error) {
tmp, err := NewGraph(path.Join(graph.Root, ":tmp:"))
if err != nil {
return "", fmt.Errorf("Couldn't create temp: %s", err)
}
if tmp.Exists(id) {
return "", fmt.Errorf("Image %d already exists", id)
}
return tmp.imageRoot(id), nil
}
func (graph *Graph) Garbage() (*Graph, error) {
return NewGraph(path.Join(graph.Root, ":garbage:"))
}
func (graph *Graph) Delete(id string) error {
garbage, err := graph.Garbage()
if err != nil {
return err
}
return os.Rename(graph.imageRoot(id), garbage.imageRoot(id))
}
func (graph *Graph) Undelete(id string) error {
garbage, err := graph.Garbage()
if err != nil {
return err
}
return os.Rename(garbage.imageRoot(id), graph.imageRoot(id))
}
func (graph *Graph) GarbageCollect() error {
garbage, err := graph.Garbage()
if err != nil {
return err
}
return os.RemoveAll(garbage.Root)
}
func (graph *Graph) Map() (map[string]*Image, error) {
// FIXME: this should replace All()
all, err := graph.All()
if err != nil {
return nil, err
}
images := make(map[string]*Image, len(all))
for _, image := range all {
images[image.Id] = image
}
return images, nil
}
func (graph *Graph) All() ([]*Image, error) {
var images []*Image
err := graph.WalkAll(func(image *Image) {
images = append(images, image)
})
return images, err
}
func (graph *Graph) WalkAll(handler func(*Image)) error {
files, err := ioutil.ReadDir(graph.Root)
if err != nil {
return err
}
for _, st := range files {
if img, err := graph.Get(st.Name()); err != nil {
// Skip image
continue
} else if handler != nil {
handler(img)
}
}
return nil
}
func (graph *Graph) ByParent() (map[string][]*Image, error) {
byParent := make(map[string][]*Image)
err := graph.WalkAll(func(image *Image) {
image, err := graph.Get(image.Parent)
if err != nil {
return
}
if children, exists := byParent[image.Parent]; exists {
byParent[image.Parent] = []*Image{image}
} else {
byParent[image.Parent] = append(children, image)
}
})
return byParent, err
}
func (graph *Graph) Heads() (map[string]*Image, error) {
heads := make(map[string]*Image)
byParent, err := graph.ByParent()
if err != nil {
return nil, err
}
err = graph.WalkAll(func(image *Image) {
// If it's not in the byParent lookup table, then
// it's not a parent -> so it's a head!
if _, exists := byParent[image.Id]; !exists {
heads[image.Id] = image
}
})
return heads, err
}
func (graph *Graph) imageRoot(id string) string {
return path.Join(graph.Root, id)
}

210
graph_test.go Normal file
View File

@@ -0,0 +1,210 @@
package docker
import (
"archive/tar"
"bytes"
"io"
"io/ioutil"
"os"
"path"
"testing"
"time"
)
func TestInit(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
// Root should exist
if _, err := os.Stat(graph.Root); err != nil {
t.Fatal(err)
}
// All() should be empty
if l, err := graph.All(); err != nil {
t.Fatal(err)
} else if len(l) != 0 {
t.Fatalf("List() should return %d, not %d", 0, len(l))
}
}
// FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
// create multiple, check the amount of images and paths, etc..)
func TestGraphCreate(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing")
if err != nil {
t.Fatal(err)
}
if err := ValidateId(image.Id); err != nil {
t.Fatal(err)
}
if image.Comment != "Testing" {
t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment)
}
if images, err := graph.All(); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
}
func TestRegister(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image := &Image{
Id: GenerateId(),
Comment: "testing",
Created: time.Now(),
}
err = graph.Register(archive, image)
if err != nil {
t.Fatal(err)
}
if images, err := graph.All(); err != nil {
t.Fatal(err)
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
if resultImg, err := graph.Get(image.Id); err != nil {
t.Fatal(err)
} else {
if resultImg.Id != image.Id {
t.Fatalf("Wrong image ID. Should be '%s', not '%s'", image.Id, resultImg.Id)
}
if resultImg.Comment != image.Comment {
t.Fatalf("Wrong image comment. Should be '%s', not '%s'", image.Comment, resultImg.Comment)
}
}
}
func TestMount(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
image, err := graph.Create(archive, nil, "Testing")
if err != nil {
t.Fatal(err)
}
tmp, err := ioutil.TempDir("", "docker-test-graph-mount-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
rootfs := path.Join(tmp, "rootfs")
if err := os.MkdirAll(rootfs, 0700); err != nil {
t.Fatal(err)
}
rw := path.Join(tmp, "rw")
if err := os.MkdirAll(rw, 0700); err != nil {
t.Fatal(err)
}
if err := image.Mount(rootfs, rw); err != nil {
t.Fatal(err)
}
// FIXME: test for mount contents
defer func() {
if err := Unmount(rootfs); err != nil {
t.Error(err)
}
}()
}
func TestDelete(t *testing.T) {
graph := tempGraph(t)
defer os.RemoveAll(graph.Root)
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 0)
img, err := graph.Create(archive, nil, "Bla bla")
if err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 1)
if err := graph.Delete(img.Id); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 0)
// Test 2 create (same name) / 1 delete
img1, err := graph.Create(archive, nil, "Testing")
if err != nil {
t.Fatal(err)
}
if _, err = graph.Create(archive, nil, "Testing"); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 2)
if err := graph.Delete(img1.Id); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 1)
// Test delete wrong name
if err := graph.Delete("Not_foo"); err == nil {
t.Fatalf("Deleting wrong ID should return an error")
}
assertNImages(graph, t, 1)
}
func assertNImages(graph *Graph, t *testing.T, n int) {
if images, err := graph.All(); err != nil {
t.Fatal(err)
} else if actualN := len(images); actualN != n {
t.Fatalf("Expected %d images, found %d", n, actualN)
}
}
/*
* HELPER FUNCTIONS
*/
func tempGraph(t *testing.T) *Graph {
tmp, err := ioutil.TempDir("", "docker-graph-")
if err != nil {
t.Fatal(err)
}
graph, err := NewGraph(tmp)
if err != nil {
t.Fatal(err)
}
return graph
}
func testArchive(t *testing.T) Archive {
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
return archive
}
func fakeTar() (io.Reader, error) {
content := []byte("Hello world!\n")
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
hdr := new(tar.Header)
hdr.Size = int64(len(content))
hdr.Name = name
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
tw.Write([]byte(content))
}
tw.Close()
return buf, nil
}

263
image.go Normal file
View File

@@ -0,0 +1,263 @@
package docker
import (
"bytes"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path"
"strings"
"time"
)
type Image struct {
Id string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
Container string `json:"container,omitempty"`
ContainerConfig Config `json:"container_config,omitempty"`
graph *Graph
}
func LoadImage(root string) (*Image, error) {
// Load the json data
jsonData, err := ioutil.ReadFile(jsonPath(root))
if err != nil {
return nil, err
}
var img Image
if err := json.Unmarshal(jsonData, &img); err != nil {
return nil, err
}
if err := ValidateId(img.Id); err != nil {
return nil, err
}
// Check that the filesystem layer exists
if stat, err := os.Stat(layerPath(root)); err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("Couldn't load image %s: no filesystem layer", img.Id)
} else {
return nil, err
}
} else if !stat.IsDir() {
return nil, fmt.Errorf("Couldn't load image %s: %s is not a directory", img.Id, layerPath(root))
}
return &img, nil
}
func StoreImage(img *Image, layerData Archive, root string) error {
// Check that root doesn't already exist
if _, err := os.Stat(root); err == nil {
return fmt.Errorf("Image %s already exists", img.Id)
} else if !os.IsNotExist(err) {
return err
}
// Store the layer
layer := layerPath(root)
if err := os.MkdirAll(layer, 0700); err != nil {
return err
}
if err := Untar(layerData, layer); err != nil {
return err
}
// Store the json ball
jsonData, err := json.Marshal(img)
if err != nil {
return err
}
if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil {
return err
}
return nil
}
func layerPath(root string) string {
return path.Join(root, "layer")
}
func jsonPath(root string) string {
return path.Join(root, "json")
}
func MountAUFS(ro []string, rw string, target string) error {
// FIXME: Now mount the layers
rwBranch := fmt.Sprintf("%v=rw", rw)
roBranches := ""
for _, layer := range ro {
roBranches += fmt.Sprintf("%v=ro:", layer)
}
branches := fmt.Sprintf("br:%v:%v", rwBranch, roBranches)
return mount("none", target, "aufs", 0, branches)
}
func (image *Image) Mount(root, rw string) error {
if mounted, err := Mounted(root); err != nil {
return err
} else if mounted {
return fmt.Errorf("%s is already mounted", root)
}
layers, err := image.layers()
if err != nil {
return err
}
// Create the target directories if they don't exist
if err := os.Mkdir(root, 0755); err != nil && !os.IsExist(err) {
return err
}
if err := os.Mkdir(rw, 0755); err != nil && !os.IsExist(err) {
return err
}
// FIXME: @creack shouldn't we do this after going over changes?
if err := MountAUFS(layers, rw, root); err != nil {
return err
}
// FIXME: Create tests for deletion
// FIXME: move this part to change.go
// Retrieve the changeset from the parent and apply it to the container
// - Retrieve the changes
changes, err := Changes(layers, layers[0])
if err != nil {
return err
}
// Iterate on changes
for _, c := range changes {
// If there is a delete
if c.Kind == ChangeDelete {
// Make sure the directory exists
file_path, file_name := path.Dir(c.Path), path.Base(c.Path)
if err := os.MkdirAll(path.Join(rw, file_path), 0755); err != nil {
return err
}
// And create the whiteout (we just need to create empty file, discard the return)
if _, err := os.Create(path.Join(path.Join(rw, file_path),
".wh."+path.Base(file_name))); err != nil {
return err
}
}
}
return nil
}
func (image *Image) Changes(rw string) ([]Change, error) {
layers, err := image.layers()
if err != nil {
return nil, err
}
return Changes(layers, rw)
}
func ValidateId(id string) error {
if id == "" {
return fmt.Errorf("Image id can't be empty")
}
if strings.Contains(id, ":") {
return fmt.Errorf("Invalid character in image id: ':'")
}
return nil
}
func GenerateId() string {
// FIXME: don't seed every time
rand.Seed(time.Now().UTC().UnixNano())
randomBytes := bytes.NewBuffer([]byte(fmt.Sprintf("%x", rand.Int())))
id, _ := ComputeId(randomBytes) // can't fail
return id
}
// ComputeId reads from `content` until EOF, then returns a SHA of what it read, as a string.
func ComputeId(content io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, content); err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)[:8]), nil
}
// Image includes convenience proxy functions to its graph
// These functions will return an error if the image is not registered
// (ie. if image.graph == nil)
func (img *Image) History() ([]*Image, error) {
var parents []*Image
if err := img.WalkHistory(
func(img *Image) error {
parents = append(parents, img)
return nil
},
); err != nil {
return nil, err
}
return parents, nil
}
// layers returns all the filesystem layers needed to mount an image
// FIXME: @shykes refactor this function with the new error handling
// (I'll do it if I have time tonight, I focus on the rest)
func (img *Image) layers() ([]string, error) {
var list []string
var e error
if err := img.WalkHistory(
func(img *Image) (err error) {
if layer, err := img.layer(); err != nil {
e = err
} else if layer != "" {
list = append(list, layer)
}
return err
},
); err != nil {
return nil, err
} else if e != nil { // Did an error occur inside the handler?
return nil, e
}
if len(list) == 0 {
return nil, fmt.Errorf("No layer found for image %s\n", img.Id)
}
return list, nil
}
func (img *Image) WalkHistory(handler func(*Image) error) (err error) {
currentImg := img
for currentImg != nil {
if handler != nil {
if err := handler(currentImg); err != nil {
return err
}
}
currentImg, err = currentImg.GetParent()
if err != nil {
return fmt.Errorf("Error while getting parent image: %v", err)
}
}
return nil
}
func (img *Image) GetParent() (*Image, error) {
if img.Parent == "" {
return nil, nil
}
if img.graph == nil {
return nil, fmt.Errorf("Can't lookup parent of unregistered image")
}
return img.graph.Get(img.Parent)
}
func (img *Image) root() (string, error) {
if img.graph == nil {
return "", fmt.Errorf("Can't lookup root of unregistered image")
}
return img.graph.imageRoot(img.Id), nil
}
// Return the path of an image's layer
func (img *Image) layer() (string, error) {
root, err := img.root()
if err != nil {
return "", err
}
return layerPath(root), nil
}

View File

@@ -22,7 +22,7 @@ lxc.network.mtu = 1500
lxc.network.ipv4 = {{.NetworkSettings.IpAddress}}/{{.NetworkSettings.IpPrefixLen}}
# root filesystem
{{$ROOTFS := .Mountpoint.Root}}
{{$ROOTFS := .RootfsPath}}
lxc.rootfs = {{$ROOTFS}}
# use a dedicated pts for the container (and limit the number of pseudo terminal

48
mount.go Normal file
View File

@@ -0,0 +1,48 @@
package docker
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
)
func Unmount(target string) error {
if err := syscall.Unmount(target, 0); err != nil {
return err
}
// Even though we just unmounted the filesystem, AUFS will prevent deleting the mntpoint
// for some time. We'll just keep retrying until it succeeds.
for retries := 0; retries < 1000; retries++ {
err := os.Remove(target)
if err == nil {
// rm mntpoint succeeded
return nil
}
if os.IsNotExist(err) {
// mntpoint doesn't exist anymore. Success.
return nil
}
// fmt.Printf("(%v) Remove %v returned: %v\n", retries, target, err)
time.Sleep(10 * time.Millisecond)
}
return fmt.Errorf("Umount: Failed to umount %v", target)
}
func Mounted(mountpoint string) (bool, error) {
mntpoint, err := os.Stat(mountpoint)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
if err != nil {
return false, err
}
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
parentSt := parent.Sys().(*syscall.Stat_t)
return mntpointSt.Dev != parentSt.Dev, nil
}

View File

@@ -1,115 +0,0 @@
package docker
import (
"fmt"
"github.com/dotcloud/docker/fake"
"github.com/dotcloud/docker/fs"
"io/ioutil"
"os"
"testing"
)
// Look for inconsistencies in a store.
func healthCheck(store *fs.Store) error {
parents := make(map[string]bool)
paths, err := store.Paths()
if err != nil {
return err
}
for _, path := range paths {
images, err := store.List(path)
if err != nil {
return err
}
IDs := make(map[string]bool) // All IDs for this path
for _, img := range images {
// Check for duplicate IDs per path
if _, exists := IDs[img.Id]; exists {
return fmt.Errorf("Duplicate ID: %s", img.Id)
} else {
IDs[img.Id] = true
}
// Store parent for 2nd pass
if parent := img.Parent; parent != "" {
parents[parent] = true
}
}
}
// Check non-existing parents
for parent := range parents {
if _, exists := parents[parent]; !exists {
return fmt.Errorf("Reference to non-registered parent: %s", parent)
}
}
return nil
}
// Note: This test is in the docker package because he needs to be run as root
func TestMount(t *testing.T) {
dir, err := ioutil.TempDir("", "docker-fs-test-mount")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
store, err := fs.New(dir)
if err != nil {
t.Fatal(err)
}
archive, err := fake.FakeTar()
if err != nil {
t.Fatal(err)
}
image, err := store.Create(archive, nil, "foo", "Testing")
if err != nil {
t.Fatal(err)
}
// Create mount targets
root, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
rw, err := ioutil.TempDir("", "docker-fs-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rw)
mountpoint, err := image.Mount(root, rw)
if err != nil {
t.Fatal(err)
}
defer mountpoint.Umount()
// Mountpoint should be marked as mounted
if !mountpoint.Mounted() {
t.Fatal("Mountpoint not mounted")
}
// There should be one mountpoint registered
if mps, err := image.Mountpoints(); err != nil {
t.Fatal(err)
} else if len(mps) != 1 {
t.Fatal("Wrong number of mountpoints registered (should be %d, not %d)", 1, len(mps))
}
// Unmounting should work
if err := mountpoint.Umount(); err != nil {
t.Fatal(err)
}
// De-registering should work
if err := mountpoint.Deregister(); err != nil {
t.Fatal(err)
}
if mps, err := image.Mountpoints(); err != nil {
t.Fatal(err)
} else if len(mps) != 0 {
t.Fatal("Wrong number of mountpoints registered (should be %d, not %d)", 0, len(mps))
}
// General health check
if err := healthCheck(store); err != nil {
t.Fatal(err)
}
}

View File

@@ -95,7 +95,8 @@ func getIfaceAddr(name string) (net.Addr, error) {
case len(addrs4) == 0:
return nil, fmt.Errorf("Interface %v has no IP addresses", name)
case len(addrs4) > 1:
return nil, fmt.Errorf("Interface %v has more than 1 IPv4 address", name)
fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n",
name, (addrs4[0].(*net.IPNet)).IP)
}
return addrs4[0], nil
}
@@ -110,6 +111,7 @@ type PortMapper struct {
func (mapper *PortMapper) cleanup() error {
// Ignore errors - This could mean the chains were never set up
iptables("-t", "nat", "-D", "PREROUTING", "-j", "DOCKER")
iptables("-t", "nat", "-D", "OUTPUT", "-j", "DOCKER")
iptables("-t", "nat", "-F", "DOCKER")
iptables("-t", "nat", "-X", "DOCKER")
mapper.mapping = make(map[int]net.TCPAddr)
@@ -123,6 +125,9 @@ func (mapper *PortMapper) setup() error {
if err := iptables("-t", "nat", "-A", "PREROUTING", "-j", "DOCKER"); err != nil {
return errors.New("Unable to setup port networking: Failed to inject docker in PREROUTING chain")
}
if err := iptables("-t", "nat", "-A", "OUTPUT", "-j", "DOCKER"); err != nil {
return errors.New("Unable to setup port networking: Failed to inject docker in OUTPUT chain")
}
return nil
}
@@ -279,7 +284,7 @@ func (iface *NetworkInterface) AllocatePort(port int) (int, error) {
if err != nil {
return -1, err
}
if err := iface.manager.portMapper.Map(extPort, net.TCPAddr{iface.IPNet.IP, port}); err != nil {
if err := iface.manager.portMapper.Map(extPort, net.TCPAddr{IP: iface.IPNet.IP, Port: port}); err != nil {
iface.manager.portAllocator.Release(extPort)
return -1, err
}
@@ -319,7 +324,7 @@ func (manager *NetworkManager) Allocate() (*NetworkInterface, error) {
return nil, err
}
iface := &NetworkInterface{
IPNet: net.IPNet{ip, manager.bridgeNetwork.Mask},
IPNet: net.IPNet{IP: ip, Mask: manager.bridgeNetwork.Mask},
Gateway: manager.bridgeNetwork.IP,
manager: manager,
}

View File

@@ -102,7 +102,7 @@ func TestConversion(t *testing.T) {
func TestIPAllocator(t *testing.T) {
gwIP, n, _ := net.ParseCIDR("127.0.0.1/29")
alloc, err := newIPAllocator(&net.IPNet{gwIP, n.Mask})
alloc, err := newIPAllocator(&net.IPNet{IP: gwIP, Mask: n.Mask})
if err != nil {
t.Fatal(err)
}

View File

@@ -80,7 +80,7 @@ class docker {
owner => "root",
group => "root",
content => template("docker/dockerd.conf"),
require => Exec["fetch-docker"],
require => Exec["copy-docker-bin"],
}
file { "/home/vagrant":

View File

@@ -8,5 +8,5 @@ respawn
script
test -f /etc/default/locale && . /etc/default/locale || true
LANG=$LANG LC_ALL=$LANG /usr/local/bin/docker -d
LANG=$LANG LC_ALL=$LANG /usr/local/bin/docker -d >> /var/log/dockerd 2>&1
end script

View File

@@ -10,6 +10,11 @@ import (
"net"
)
// Note: the globals are here to avoid import cycle
// FIXME: Handle debug levels mode?
var DEBUG_FLAG bool = false
var CLIENT_SOCKET io.Writer = nil
// Connect to a remote endpoint using protocol `proto` and address `addr`,
// issue a single call, and return the result.
// `proto` may be "tcp", "unix", etc. See the `net` package for available protocols.
@@ -42,6 +47,9 @@ func ListenAndServe(proto, addr string, service Service) error {
return err
} else {
go func() {
if DEBUG_FLAG {
CLIENT_SOCKET = conn
}
if err := Serve(conn, service); err != nil {
log.Printf("Error: " + err.Error() + "\n")
fmt.Fprintf(conn, "Error: "+err.Error()+"\n")

385
registry.go Normal file
View File

@@ -0,0 +1,385 @@
package docker
import (
"encoding/json"
"fmt"
"github.com/dotcloud/docker/auth"
"io"
"io/ioutil"
"net/http"
"path"
"strings"
)
//FIXME: Set the endpoint in a conf file or via commandline
//const REGISTRY_ENDPOINT = "http://registry-creack.dotcloud.com/v1"
const REGISTRY_ENDPOINT = auth.REGISTRY_SERVER + "/v1"
// Build an Image object from raw json data
func NewImgJson(src []byte) (*Image, error) {
ret := &Image{}
Debugf("Json string: {%s}\n", src)
// FIXME: Is there a cleaner way to "puryfy" the input json?
if err := json.Unmarshal(src, ret); err != nil {
return nil, err
}
return ret, nil
}
// Build an Image object list from a raw json data
// FIXME: Do this in "stream" mode
func NewMultipleImgJson(src []byte) ([]*Image, error) {
ret := []*Image{}
dec := json.NewDecoder(strings.NewReader(strings.Replace(string(src), "null", "\"\"", -1)))
for {
m := &Image{}
if err := dec.Decode(m); err == io.EOF {
break
} else if err != nil {
return nil, err
}
ret = append(ret, m)
}
return ret, nil
}
// Retrieve the history of a given image from the Registry.
// Return a list of the parent's json (requested image included)
func (graph *Graph) getRemoteHistory(imgId string, authConfig *auth.AuthConfig) ([]*Image, error) {
client := &http.Client{}
req, err := http.NewRequest("GET", REGISTRY_ENDPOINT+"/images/"+imgId+"/history", nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := client.Do(req)
if err != nil || res.StatusCode != 200 {
if res != nil {
return nil, fmt.Errorf("Internal server error: %d trying to fetch remote history for %s", res.StatusCode, imgId)
}
return nil, err
}
defer res.Body.Close()
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("Error while reading the http response: %s\n", err)
}
history, err := NewMultipleImgJson(jsonString)
if err != nil {
return nil, fmt.Errorf("Error while parsing the json: %s\n", err)
}
return history, nil
}
// Check if an image exists in the Registry
func (graph *Graph) LookupRemoteImage(imgId string, authConfig *auth.AuthConfig) bool {
rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
req, err := http.NewRequest("GET", REGISTRY_ENDPOINT+"/images/"+imgId+"/json", nil)
if err != nil {
return false
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := rt.RoundTrip(req)
if err != nil || res.StatusCode != 307 {
return false
}
return res.StatusCode == 307
}
// Retrieve an image from the Registry.
// Returns the Image object as well as the layer as an Archive (io.Reader)
func (graph *Graph) getRemoteImage(imgId string, authConfig *auth.AuthConfig) (*Image, Archive, error) {
client := &http.Client{}
// Get the Json
req, err := http.NewRequest("GET", REGISTRY_ENDPOINT+"/images/"+imgId+"/json", nil)
if err != nil {
return nil, nil, fmt.Errorf("Error while getting from the server: %s\n", err)
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := client.Do(req)
if err != nil || res.StatusCode != 200 {
if res != nil {
return nil, nil, fmt.Errorf("Internal server error: %d trying to get image %s", res.StatusCode, imgId)
}
return nil, nil, err
}
defer res.Body.Close()
jsonString, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, nil, fmt.Errorf("Error while reading the http response: %s\n", err)
}
img, err := NewImgJson(jsonString)
if err != nil {
return nil, nil, fmt.Errorf("Error while parsing the json: %s\n", err)
}
img.Id = imgId
// Get the layer
req, err = http.NewRequest("GET", REGISTRY_ENDPOINT+"/images/"+imgId+"/layer", nil)
if err != nil {
return nil, nil, fmt.Errorf("Error while getting from the server: %s\n", err)
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err = client.Do(req)
if err != nil {
return nil, nil, err
}
return img, res.Body, nil
}
func (graph *Graph) PullImage(imgId string, authConfig *auth.AuthConfig) error {
history, err := graph.getRemoteHistory(imgId, authConfig)
if err != nil {
return err
}
// FIXME: Try to stream the images?
// FIXME: Lunch the getRemoteImage() in goroutines
for _, j := range history {
if !graph.Exists(j.Id) {
img, layer, err := graph.getRemoteImage(j.Id, authConfig)
if err != nil {
// FIXME: Keep goging in case of error?
return err
}
if err = graph.Register(layer, img); err != nil {
return err
}
}
}
return nil
}
// FIXME: Handle the askedTag parameter
func (graph *Graph) PullRepository(stdout io.Writer, remote, askedTag string, repositories *TagStore, authConfig *auth.AuthConfig) error {
client := &http.Client{}
fmt.Fprintf(stdout, "Pulling repo: %s\n", REGISTRY_ENDPOINT+"/users/"+remote)
req, err := http.NewRequest("GET", REGISTRY_ENDPOINT+"/users/"+remote, nil)
if err != nil {
return err
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := client.Do(req)
if err != nil || res.StatusCode != 200 {
if res != nil {
return fmt.Errorf("Internal server error: %d trying to pull %s", res.StatusCode, remote)
}
return err
}
defer res.Body.Close()
rawJson, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
t := map[string]string{}
if err = json.Unmarshal(rawJson, &t); err != nil {
return err
}
for tag, rev := range t {
if err = graph.PullImage(rev, authConfig); err != nil {
return err
}
if err = repositories.Set(remote, tag, rev, true); err != nil {
return err
}
}
if err = repositories.Save(); err != nil {
return err
}
return nil
}
// Push a local image to the registry with its history if needed
func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth.AuthConfig) error {
client := &http.Client{}
// FIXME: Factorize the code
// FIXME: Do the puts in goroutines
if err := imgOrig.WalkHistory(func(img *Image) error {
jsonRaw, err := ioutil.ReadFile(path.Join(graph.Root, img.Id, "json"))
if err != nil {
return fmt.Errorf("Error while retreiving the path for {%s}: %s", img.Id, err)
}
fmt.Fprintf(stdout, "Pushing image [%s] on {%s}\n", img.Id, REGISTRY_ENDPOINT+"/images/"+img.Id+"/json")
// FIXME: try json with UTF8
jsonData := strings.NewReader(string(jsonRaw))
req, err := http.NewRequest("PUT", REGISTRY_ENDPOINT+"/images/"+img.Id+"/json", jsonData)
if err != nil {
return err
}
req.Header.Add("Content-type", "application/json")
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := client.Do(req)
if err != nil || res.StatusCode != 200 {
if res == nil {
return fmt.Errorf(
"Error: Internal server error trying to push image {%s} (json): %s",
img.Id, err)
}
Debugf("Pushing return status: %d\n", res.StatusCode)
switch res.StatusCode {
case 204:
// Case where the image is already on the Registry
// FIXME: Do not be silent?
fmt.Fprintf(stdout, "The image %s is already up to date on the registry.\n", img.Id)
return nil
case 400:
return fmt.Errorf("Error: Invalid Json")
default:
return fmt.Errorf(
"Error: Internal server error: %d trying to push image {%s} (json): %s\n",
res.StatusCode, img.Id, err)
}
}
req2, err := http.NewRequest("PUT", REGISTRY_ENDPOINT+"/images/"+img.Id+"/layer", nil)
req2.SetBasicAuth(authConfig.Username, authConfig.Password)
res2, err := client.Do(req2)
if err != nil || res2.StatusCode != 307 {
return fmt.Errorf(
"Internal server error trying to push image {%s} (layer 1): %s\n",
img.Id, err)
}
url, err := res2.Location()
if err != nil || url == nil {
return fmt.Errorf(
"Fail to retrieve layer storage URL for image {%s}: %s\n",
img.Id, err)
}
// FIXME: Don't do this :D. Check the S3 requierement and implement chunks of 5MB
// FIXME2: I won't stress it enough, DON'T DO THIS! very high priority
layerData2, err := Tar(path.Join(graph.Root, img.Id, "layer"), Gzip)
layerData, err := Tar(path.Join(graph.Root, img.Id, "layer"), Gzip)
if err != nil {
return fmt.Errorf(
"Error while retrieving layer for {%s}: %s\n",
img.Id, err)
}
req3, err := http.NewRequest("PUT", url.String(), layerData)
if err != nil {
return err
}
tmp, err := ioutil.ReadAll(layerData2)
if err != nil {
return err
}
req3.ContentLength = int64(len(tmp))
req3.TransferEncoding = []string{"none"}
res3, err := client.Do(req3)
if err != nil || res3.StatusCode != 200 {
if res3 == nil {
return fmt.Errorf(
"Error trying to push image {%s} (layer 2): %s\n",
img.Id, err)
}
return fmt.Errorf(
"Error trying to push image {%s} (layer 2): %s (%d)\n",
img.Id, err, res3.StatusCode)
}
return nil
}); err != nil {
return err
}
return nil
}
// push a tag on the registry.
// Remote has the format '<user>/<repo>
func (graph *Graph) pushTag(remote, revision, tag string, authConfig *auth.AuthConfig) error {
// Keep this for backward compatibility
if tag == "" {
tag = "lastest"
}
// "jsonify" the string
revision = "\"" + revision + "\""
Debugf("Pushing tags for rev [%s] on {%s}\n", revision, REGISTRY_ENDPOINT+"/users/"+remote+"/"+tag)
client := &http.Client{}
req, err := http.NewRequest("PUT", REGISTRY_ENDPOINT+"/users/"+remote+"/"+tag, strings.NewReader(revision))
req.Header.Add("Content-type", "application/json")
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := client.Do(req)
if err != nil || (res.StatusCode != 200 && res.StatusCode != 201) {
if res != nil {
return fmt.Errorf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote)
}
return err
}
Debugf("Result of push tag: %d\n", res.StatusCode)
switch res.StatusCode {
default:
return fmt.Errorf("Error %d\n", res.StatusCode)
case 200:
case 201:
}
return nil
}
func (graph *Graph) LookupRemoteRepository(remote string, authConfig *auth.AuthConfig) bool {
rt := &http.Transport{Proxy: http.ProxyFromEnvironment}
req, err := http.NewRequest("GET", REGISTRY_ENDPOINT+"/users/"+remote, nil)
if err != nil {
return false
}
req.SetBasicAuth(authConfig.Username, authConfig.Password)
res, err := rt.RoundTrip(req)
if err != nil || res.StatusCode != 200 {
return false
}
return true
}
func (graph *Graph) pushPrimitive(stdout io.Writer, remote, tag, imgId string, authConfig *auth.AuthConfig) error {
// CHeck if the local impage exists
img, err := graph.Get(imgId)
if err != nil {
return err
}
// Push the image
if err = graph.PushImage(stdout, img, authConfig); err != nil {
return err
}
// And then the tag
if err = graph.pushTag(remote, imgId, tag, authConfig); err != nil {
return err
}
return nil
}
// Push a repository to the registry.
// Remote has the format '<user>/<repo>
func (graph *Graph) PushRepository(stdout io.Writer, remote string, localRepo Repository, authConfig *auth.AuthConfig) error {
// Check if the remote repository exists
// FIXME: @lopter How to handle this?
// if !graph.LookupRemoteRepository(remote, authConfig) {
// return fmt.Errorf("The remote repository %s does not exist\n", remote)
// }
// For each image within the repo, push them
for tag, imgId := range localRepo {
if err := graph.pushPrimitive(stdout, remote, tag, imgId, authConfig); err != nil {
// FIXME: Continue on error?
return err
}
}
return nil
}

292
runtime.go Normal file
View File

@@ -0,0 +1,292 @@
package docker
import (
"container/list"
"fmt"
"github.com/dotcloud/docker/auth"
"io"
"io/ioutil"
"os"
"path"
"sort"
"sync"
"time"
)
type Runtime struct {
root string
repository string
containers *list.List
networkManager *NetworkManager
graph *Graph
repositories *TagStore
authConfig *auth.AuthConfig
}
var sysInitPath string
func init() {
sysInitPath = SelfPath()
}
func (runtime *Runtime) List() []*Container {
containers := new(History)
for e := runtime.containers.Front(); e != nil; e = e.Next() {
containers.Add(e.Value.(*Container))
}
return *containers
}
func (runtime *Runtime) getContainerElement(id string) *list.Element {
for e := runtime.containers.Front(); e != nil; e = e.Next() {
container := e.Value.(*Container)
if container.Id == id {
return e
}
}
return nil
}
func (runtime *Runtime) Get(id string) *Container {
e := runtime.getContainerElement(id)
if e == nil {
return nil
}
return e.Value.(*Container)
}
func (runtime *Runtime) Exists(id string) bool {
return runtime.Get(id) != nil
}
func (runtime *Runtime) containerRoot(id string) string {
return path.Join(runtime.repository, id)
}
func (runtime *Runtime) Create(config *Config) (*Container, error) {
// Lookup image
img, err := runtime.repositories.LookupImage(config.Image)
if err != nil {
return nil, err
}
container := &Container{
// FIXME: we should generate the ID here instead of receiving it as an argument
Id: GenerateId(),
Created: time.Now(),
Path: config.Cmd[0],
Args: config.Cmd[1:], //FIXME: de-duplicate from config
Config: config,
Image: img.Id, // Always use the resolved image id
NetworkSettings: &NetworkSettings{},
// FIXME: do we need to store this in the container?
SysInitPath: sysInitPath,
}
container.root = runtime.containerRoot(container.Id)
// Step 1: create the container directory.
// This doubles as a barrier to avoid race conditions.
if err := os.Mkdir(container.root, 0700); err != nil {
return nil, err
}
// Step 2: save the container json
if err := container.ToDisk(); err != nil {
return nil, err
}
// Step 3: register the container
if err := runtime.Register(container); err != nil {
return nil, err
}
return container, nil
}
func (runtime *Runtime) Load(id string) (*Container, error) {
container := &Container{root: runtime.containerRoot(id)}
if err := container.FromDisk(); err != nil {
return nil, err
}
if container.Id != id {
return container, fmt.Errorf("Container %s is stored at %s", container.Id, id)
}
if err := runtime.Register(container); err != nil {
return nil, err
}
return container, nil
}
// Register makes a container object usable by the runtime as <container.Id>
func (runtime *Runtime) Register(container *Container) error {
if container.runtime != nil || runtime.Exists(container.Id) {
return fmt.Errorf("Container is already loaded")
}
if err := validateId(container.Id); err != nil {
return err
}
container.runtime = runtime
// Setup state lock (formerly in newState()
lock := new(sync.Mutex)
container.State.stateChangeLock = lock
container.State.stateChangeCond = sync.NewCond(lock)
// Attach to stdout and stderr
container.stderr = newWriteBroadcaster()
container.stdout = newWriteBroadcaster()
// Attach to stdin
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
} else {
container.stdinPipe = NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
// Setup logging of stdout and stderr to disk
if err := runtime.LogToDisk(container.stdout, container.logPath("stdout")); err != nil {
return err
}
if err := runtime.LogToDisk(container.stderr, container.logPath("stderr")); err != nil {
return err
}
// done
runtime.containers.PushBack(container)
return nil
}
func (runtime *Runtime) LogToDisk(src *writeBroadcaster, dst string) error {
log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return err
}
src.AddWriter(NopWriteCloser(log))
return nil
}
func (runtime *Runtime) Destroy(container *Container) error {
element := runtime.getContainerElement(container.Id)
if element == nil {
return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.Id)
}
if err := container.Stop(); err != nil {
return err
}
if mounted, err := container.Mounted(); err != nil {
return err
} else if mounted {
if err := container.Unmount(); err != nil {
return fmt.Errorf("Unable to unmount container %v: %v", container.Id, err)
}
}
// Deregister the container before removing its directory, to avoid race conditions
runtime.containers.Remove(element)
if err := os.RemoveAll(container.root); err != nil {
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.Id, err)
}
return nil
}
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository
func (runtime *Runtime) Commit(id, repository, tag string) (*Image, error) {
container := runtime.Get(id)
if container == nil {
return nil, fmt.Errorf("No such container: %s", id)
}
// FIXME: freeze the container before copying it to avoid data corruption?
// FIXME: this shouldn't be in commands.
rwTar, err := container.ExportRw()
if err != nil {
return nil, err
}
// Create a new image from the container's base layers + a new layer from container changes
img, err := runtime.graph.Create(rwTar, container, "")
if err != nil {
return nil, err
}
// Register the image if needed
if repository != "" {
if err := runtime.repositories.Set(repository, tag, img.Id, true); err != nil {
return img, err
}
}
return img, nil
}
func (runtime *Runtime) restore() error {
dir, err := ioutil.ReadDir(runtime.repository)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
container, err := runtime.Load(id)
if err != nil {
Debugf("Failed to load container %v: %v", id, err)
continue
}
Debugf("Loaded container %v", container.Id)
}
return nil
}
func NewRuntime() (*Runtime, error) {
return NewRuntimeFromDirectory("/var/lib/docker")
}
func NewRuntimeFromDirectory(root string) (*Runtime, error) {
runtime_repo := path.Join(root, "containers")
if err := os.MkdirAll(runtime_repo, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
g, err := NewGraph(path.Join(root, "graph"))
if err != nil {
return nil, err
}
repositories, err := NewTagStore(path.Join(root, "repositories"), g)
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
}
netManager, err := newNetworkManager(networkBridgeIface)
if err != nil {
return nil, err
}
authConfig, err := auth.LoadConfig(root)
if err != nil && authConfig == nil {
// If the auth file does not exist, keep going
return nil, err
}
runtime := &Runtime{
root: root,
repository: runtime_repo,
containers: list.New(),
networkManager: netManager,
graph: g,
repositories: repositories,
authConfig: authConfig,
}
if err := runtime.restore(); err != nil {
return nil, err
}
return runtime, nil
}
type History []*Container
func (history *History) Len() int {
return len(*history)
}
func (history *History) Less(i, j int) bool {
containers := *history
return containers[j].When().Before(containers[i].When())
}
func (history *History) Swap(i, j int) {
containers := *history
tmp := containers[i]
containers[i] = containers[j]
containers[j] = tmp
}
func (history *History) Add(container *Container) {
*history = append(*history, container)
sort.Sort(history)
}

View File

@@ -1,7 +1,6 @@
package docker
import (
"github.com/dotcloud/docker/fs"
"io"
"io/ioutil"
"os"
@@ -11,13 +10,13 @@ import (
)
const testLayerPath string = "/var/lib/docker/docker-ut.tar"
const unitTestImageName string = "busybox"
const unitTestImageName string = "http://get.docker.io/images/busybox"
var unitTestStoreBase string
var srv *Server
func nuke(docker *Docker) error {
return os.RemoveAll(docker.root)
func nuke(runtime *Runtime) error {
return os.RemoveAll(runtime.root)
}
func CopyDirectory(source, dest string) error {
@@ -57,14 +56,13 @@ func init() {
unitTestStoreBase = root
// Make it our Store root
docker, err := NewFromDirectory(root)
runtime, err := NewRuntimeFromDirectory(root)
if err != nil {
panic(err)
}
// Create the "Server"
srv := &Server{
images: docker.Store,
containers: docker,
runtime: runtime,
}
// Retrieve the Image
if err := srv.CmdImport(os.Stdin, os.Stdout, unitTestImageName); err != nil {
@@ -72,7 +70,7 @@ func init() {
}
}
func newTestDocker() (*Docker, error) {
func newTestRuntime() (*Runtime, error) {
root, err := ioutil.TempDir("", "docker-test")
if err != nil {
return nil, err
@@ -85,16 +83,16 @@ func newTestDocker() (*Docker, error) {
return nil, err
}
docker, err := NewFromDirectory(root)
runtime, err := NewRuntimeFromDirectory(root)
if err != nil {
return nil, err
}
return docker, nil
return runtime, nil
}
func GetTestImage(docker *Docker) *fs.Image {
imgs, err := docker.Store.Images()
func GetTestImage(runtime *Runtime) *Image {
imgs, err := runtime.graph.All()
if err != nil {
panic(err)
} else if len(imgs) < 1 {
@@ -103,161 +101,151 @@ func GetTestImage(docker *Docker) *fs.Image {
return imgs[0]
}
func TestCreate(t *testing.T) {
docker, err := newTestDocker()
func TestRuntimeCreate(t *testing.T) {
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
defer nuke(runtime)
// Make sure we start we 0 containers
if len(docker.List()) != 0 {
t.Errorf("Expected 0 containers, %v found", len(docker.List()))
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
}
container, err := docker.Create(
"test_create",
"ls",
[]string{"-al"},
GetTestImage(docker),
&Config{},
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := docker.Destroy(container); err != nil {
if err := runtime.Destroy(container); err != nil {
t.Error(err)
}
}()
// Make sure we can find the newly created container with List()
if len(docker.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(docker.List()))
if len(runtime.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime.List()))
}
// Make sure the container List() returns is the right one
if docker.List()[0].Id != "test_create" {
t.Errorf("Unexpected container %v returned by List", docker.List()[0])
if runtime.List()[0].Id != container.Id {
t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
}
// Make sure we can get the container with Get()
if docker.Get("test_create") == nil {
if runtime.Get(container.Id) == nil {
t.Errorf("Unable to get newly created container")
}
// Make sure it is the right container
if docker.Get("test_create") != container {
if runtime.Get(container.Id) != container {
t.Errorf("Get() returned the wrong container")
}
// Make sure Exists returns it as existing
if !docker.Exists("test_create") {
if !runtime.Exists(container.Id) {
t.Errorf("Exists() returned false for a newly created container")
}
}
func TestDestroy(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container, err := docker.Create(
"test_destroy",
"ls",
[]string{"-al"},
GetTestImage(docker),
&Config{},
defer nuke(runtime)
container, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
// Destroy
if err := docker.Destroy(container); err != nil {
if err := runtime.Destroy(container); err != nil {
t.Error(err)
}
// Make sure docker.Exists() behaves correctly
if docker.Exists("test_destroy") {
// Make sure runtime.Exists() behaves correctly
if runtime.Exists("test_destroy") {
t.Errorf("Exists() returned true")
}
// Make sure docker.List() doesn't list the destroyed container
if len(docker.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(docker.List()))
// Make sure runtime.List() doesn't list the destroyed container
if len(runtime.List()) != 0 {
t.Errorf("Expected 0 container, %v found", len(runtime.List()))
}
// Make sure docker.Get() refuses to return the unexisting container
if docker.Get("test_destroy") != nil {
// Make sure runtime.Get() refuses to return the unexisting container
if runtime.Get(container.Id) != nil {
t.Errorf("Unable to get newly created container")
}
// Make sure the container root directory does not exist anymore
_, err = os.Stat(container.Root)
_, err = os.Stat(container.root)
if err == nil || !os.IsNotExist(err) {
t.Errorf("Container root directory still exists after destroy")
}
// Test double destroy
if err := docker.Destroy(container); err == nil {
if err := runtime.Destroy(container); err == nil {
// It should have failed
t.Errorf("Double destroy did not fail")
}
}
func TestGet(t *testing.T) {
docker, err := newTestDocker()
runtime, err := newTestRuntime()
if err != nil {
t.Fatal(err)
}
defer nuke(docker)
container1, err := docker.Create(
"test1",
"ls",
[]string{"-al"},
GetTestImage(docker),
&Config{},
defer nuke(runtime)
container1, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container1)
defer runtime.Destroy(container1)
container2, err := docker.Create(
"test2",
"ls",
[]string{"-al"},
GetTestImage(docker),
&Config{},
container2, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container2)
defer runtime.Destroy(container2)
container3, err := docker.Create(
"test3",
"ls",
[]string{"-al"},
GetTestImage(docker),
&Config{},
container3, err := runtime.Create(&Config{
Image: GetTestImage(runtime).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker.Destroy(container3)
defer runtime.Destroy(container3)
if docker.Get("test1") != container1 {
t.Errorf("Get(test1) returned %v while expecting %v", docker.Get("test1"), container1)
if runtime.Get(container1.Id) != container1 {
t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.Id), container1)
}
if docker.Get("test2") != container2 {
t.Errorf("Get(test2) returned %v while expecting %v", docker.Get("test2"), container2)
if runtime.Get(container2.Id) != container2 {
t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.Id), container2)
}
if docker.Get("test3") != container3 {
t.Errorf("Get(test3) returned %v while expecting %v", docker.Get("test3"), container3)
if runtime.Get(container3.Id) != container3 {
t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.Id), container3)
}
}
@@ -275,25 +263,23 @@ func TestRestore(t *testing.T) {
t.Fatal(err)
}
docker1, err := NewFromDirectory(root)
runtime1, err := NewRuntimeFromDirectory(root)
if err != nil {
t.Fatal(err)
}
// Create a container with one instance of docker
container1, err := docker1.Create(
"restore_test",
"ls",
[]string{"-al"},
GetTestImage(docker1),
&Config{},
container1, err := runtime1.Create(&Config{
Image: GetTestImage(runtime1).Id,
Cmd: []string{"ls", "-al"},
},
)
if err != nil {
t.Fatal(err)
}
defer docker1.Destroy(container1)
if len(docker1.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(docker1.List()))
defer runtime1.Destroy(container1)
if len(runtime1.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime1.List()))
}
if err := container1.Run(); err != nil {
t.Fatal(err)
@@ -301,15 +287,15 @@ func TestRestore(t *testing.T) {
// Here are are simulating a docker restart - that is, reloading all containers
// from scratch
docker2, err := NewFromDirectory(root)
runtime2, err := NewRuntimeFromDirectory(root)
if err != nil {
t.Fatal(err)
}
defer nuke(docker2)
if len(docker2.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(docker2.List()))
defer nuke(runtime2)
if len(runtime2.List()) != 1 {
t.Errorf("Expected 1 container, %v found", len(runtime2.List()))
}
container2 := docker2.Get("restore_test")
container2 := runtime2.Get(container1.Id)
if container2 == nil {
t.Fatal("Unable to Get container")
}

View File

@@ -2,7 +2,6 @@ package docker
import (
"fmt"
"github.com/dotcloud/docker/future"
"sync"
"time"
)
@@ -17,18 +16,10 @@ type State struct {
stateChangeCond *sync.Cond
}
func newState() *State {
lock := new(sync.Mutex)
return &State{
stateChangeLock: lock,
stateChangeCond: sync.NewCond(lock),
}
}
// String returns a human-readable description of the state
func (s *State) String() string {
if s.Running {
return fmt.Sprintf("Up %s", future.HumanDuration(time.Now().Sub(s.StartedAt)))
return fmt.Sprintf("Up %s", HumanDuration(time.Now().Sub(s.StartedAt)))
}
return fmt.Sprintf("Exit %d", s.ExitCode)
}

View File

@@ -52,13 +52,6 @@ func changeUser(u string) {
}
}
// Set the environment to a known, repeatable state
func setupEnv() {
os.Clearenv()
os.Setenv("HOME", "/")
os.Setenv("PATH", "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin")
}
func executeProgram(name string, args []string) {
path, err := exec.LookPath(name)
if err != nil {
@@ -86,6 +79,5 @@ func SysInit() {
setupNetworking(*gw)
changeUser(*u)
setupEnv()
executeProgram(flag.Arg(0), flag.Args())
}

184
tags.go Normal file
View File

@@ -0,0 +1,184 @@
package docker
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
const DEFAULT_TAG = "latest"
type TagStore struct {
path string
graph *Graph
Repositories map[string]Repository
}
type Repository map[string]string
func NewTagStore(path string, graph *Graph) (*TagStore, error) {
abspath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
store := &TagStore{
path: abspath,
graph: graph,
Repositories: make(map[string]Repository),
}
// Load the json file if it exists, otherwise create it.
if err := store.Reload(); os.IsNotExist(err) {
if err := store.Save(); err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return store, nil
}
func (store *TagStore) Save() error {
// Store the json ball
jsonData, err := json.Marshal(store)
if err != nil {
return err
}
if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil {
return err
}
return nil
}
func (store *TagStore) Reload() error {
jsonData, err := ioutil.ReadFile(store.path)
if err != nil {
return err
}
if err := json.Unmarshal(jsonData, store); err != nil {
return err
}
return nil
}
func (store *TagStore) LookupImage(name string) (*Image, error) {
img, err := store.graph.Get(name)
if err != nil {
// FIXME: standardize on returning nil when the image doesn't exist, and err for everything else
// (so we can pass all errors here)
repoAndTag := strings.SplitN(name, ":", 2)
if len(repoAndTag) == 1 {
repoAndTag = append(repoAndTag, DEFAULT_TAG)
}
if i, err := store.GetImage(repoAndTag[0], repoAndTag[1]); err != nil {
return nil, err
} else if i == nil {
return nil, fmt.Errorf("No such image: %s", name)
} else {
img = i
}
}
return img, nil
}
// Return a reverse-lookup table of all the names which refer to each image
// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}}
func (store *TagStore) ById() map[string][]string {
byId := make(map[string][]string)
for repoName, repository := range store.Repositories {
for tag, id := range repository {
name := repoName + ":" + tag
if _, exists := byId[id]; !exists {
byId[id] = []string{name}
} else {
byId[id] = append(byId[id], name)
}
}
}
return byId
}
func (store *TagStore) ImageName(id string) string {
if names, exists := store.ById()[id]; exists && len(names) > 0 {
return names[0]
}
return id
}
func (store *TagStore) Set(repoName, tag, imageName string, force bool) error {
img, err := store.LookupImage(imageName)
if err != nil {
return err
}
if tag == "" {
tag = DEFAULT_TAG
}
if err := validateRepoName(repoName); err != nil {
return err
}
if err := validateTagName(tag); err != nil {
return err
}
if err := store.Reload(); err != nil {
return err
}
var repo Repository
if r, exists := store.Repositories[repoName]; exists {
repo = r
} else {
repo = make(map[string]string)
if old, exists := store.Repositories[repoName]; exists && !force {
return fmt.Errorf("Tag %s:%s is already set to %s", repoName, tag, old)
}
store.Repositories[repoName] = repo
}
repo[tag] = img.Id
return store.Save()
}
func (store *TagStore) Get(repoName string) (Repository, error) {
if err := store.Reload(); err != nil {
return nil, err
}
if r, exists := store.Repositories[repoName]; exists {
return r, nil
}
return nil, nil
}
func (store *TagStore) GetImage(repoName, tag string) (*Image, error) {
repo, err := store.Get(repoName)
if err != nil {
return nil, err
} else if repo == nil {
return nil, nil
}
if revision, exists := repo[tag]; exists {
return store.graph.Get(revision)
}
return nil, nil
}
// Validate the name of a repository
func validateRepoName(name string) error {
if name == "" {
return fmt.Errorf("Repository name can't be empty")
}
if strings.Contains(name, ":") {
return fmt.Errorf("Illegal repository name: %s", name)
}
return nil
}
// Validate the name of a tag
func validateTagName(name string) error {
if name == "" {
return fmt.Errorf("Tag name can't be empty")
}
if strings.Contains(name, "/") || strings.Contains(name, ":") {
return fmt.Errorf("Illegal tag name: %s", name)
}
return nil
}

View File

@@ -114,27 +114,6 @@ func IsTerminal(fd int) bool {
return err == 0
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= ISTRIP | INLCR | IGNCR | IXON | IXOFF
newState.Iflag |= ICRNL
newState.Oflag |= ONLCR
newState.Lflag &^= ECHO | ICANON | ISIG
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(setTermios), uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
return nil, err
}
return &oldState, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {

View File

@@ -1,8 +1,32 @@
package term
import "syscall"
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= ISTRIP | INLCR | IGNCR | IXON | IXOFF
newState.Iflag |= ICRNL
newState.Oflag |= ONLCR
newState.Lflag &^= ECHO | ICANON | ISIG
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(setTermios), uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@@ -1,8 +1,32 @@
package term
import "syscall"
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TCGETS
setTermios = syscall.TCSETS
)
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= ISTRIP | IXON | IXOFF
newState.Iflag |= ICRNL
newState.Oflag |= ONLCR
newState.Lflag &^= ECHO | ICANON | ISIG
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(setTermios), uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
return nil, err
}
return &oldState, nil
}

101
utils.go
View File

@@ -3,13 +3,114 @@ package docker
import (
"bytes"
"container/list"
"errors"
"fmt"
"github.com/dotcloud/docker/rcli"
"io"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"sync"
"time"
)
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error)
go func() {
ch <- f()
}()
return ch
}
// Request a given URL and return an io.Reader
func Download(url string, stderr io.Writer) (*http.Response, error) {
var resp *http.Response
var err error = nil
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
}
return resp, nil
}
// Debug function, if the debug flag is set, then display. Do nothing otherwise
// If Docker is in damon mode, also send the debug info on the socket
func Debugf(format string, a ...interface{}) {
if rcli.DEBUG_FLAG {
log.Printf(format, a...)
if rcli.CLIENT_SOCKET != nil {
fmt.Fprintf(rcli.CLIENT_SOCKET, log.Prefix()+format, a...)
}
}
}
// Reader with progress bar
type progressReader struct {
reader io.ReadCloser // Stream to read from
output io.Writer // Where to send progress bar to
read_total int // Expected stream length (bytes)
read_progress int // How much has been read so far (bytes)
last_update int // How many bytes read at least update
}
func (r *progressReader) Read(p []byte) (n int, err error) {
read, err := io.ReadCloser(r.reader).Read(p)
r.read_progress += read
// Only update progress for every 1% read
update_every := int(0.01 * float64(r.read_total))
if r.read_progress-r.last_update > update_every || r.read_progress == r.read_total {
fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r",
r.read_progress,
r.read_total,
float64(r.read_progress)/float64(r.read_total)*100)
r.last_update = r.read_progress
}
// Send newline when complete
if err == io.EOF {
fmt.Fprintf(r.output, "\n")
}
return read, err
}
func (r *progressReader) Close() error {
return io.ReadCloser(r.reader).Close()
}
func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader {
return &progressReader{r, output, size, 0, 0}
}
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.)
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", d.Hours()/24/365)
}
func Trunc(s string, maxlen int) string {
if len(s) <= maxlen {
return s