mirror of
https://github.com/moby/moby.git
synced 2026-01-12 03:01:38 +00:00
Compare commits
113 Commits
upstream/0
...
0.0.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33c2f07fc7 | ||
|
|
1480bff3a9 | ||
|
|
ac7fa37be3 | ||
|
|
065eca9d4e | ||
|
|
a030c1bd24 | ||
|
|
4a1c40364c | ||
|
|
4015f8dd34 | ||
|
|
16f132b156 | ||
|
|
b400d78b19 | ||
|
|
e190c27d10 | ||
|
|
da47420af6 | ||
|
|
b55e461122 | ||
|
|
c4640689af | ||
|
|
27ad71e025 | ||
|
|
0a35db8fd0 | ||
|
|
c1c60a2835 | ||
|
|
ad29305630 | ||
|
|
bb5b7897a4 | ||
|
|
5d569a8741 | ||
|
|
878ae25980 | ||
|
|
b2cf5041cd | ||
|
|
29210802dd | ||
|
|
54b44e3705 | ||
|
|
9ff6dd767a | ||
|
|
137af244ee | ||
|
|
030a33aa23 | ||
|
|
2839a59018 | ||
|
|
82188b9c2c | ||
|
|
4d80958b01 | ||
|
|
cb7819cbc5 | ||
|
|
0e0d76b7ac | ||
|
|
eab39dc3fa | ||
|
|
47a5a51d39 | ||
|
|
76bd475648 | ||
|
|
9598b28933 | ||
|
|
d615496c4c | ||
|
|
3b9c97a5d5 | ||
|
|
7cc0a07524 | ||
|
|
98a4d38eb4 | ||
|
|
8c83484bf2 | ||
|
|
7ec6a311f8 | ||
|
|
d94a5b7d05 | ||
|
|
9b94d89b06 | ||
|
|
f1cf5074f5 | ||
|
|
18519f12ae | ||
|
|
c0b9afbf01 | ||
|
|
5e79c4394a | ||
|
|
be20f3c518 | ||
|
|
c65c1738b5 | ||
|
|
7188dcd96d | ||
|
|
3cd34f1039 | ||
|
|
fcc0af9f5b | ||
|
|
de753d5a90 | ||
|
|
e5e66716df | ||
|
|
b97f9e8148 | ||
|
|
cc18df61b9 | ||
|
|
681e452832 | ||
|
|
a444327ab6 | ||
|
|
722d4c92c3 | ||
|
|
27512332d0 | ||
|
|
0b5e223b4e | ||
|
|
78ef1ba32f | ||
|
|
e81d7132fa | ||
|
|
c74408d7b8 | ||
|
|
a1712ea6d3 | ||
|
|
a216712f3c | ||
|
|
2b8c79d74c | ||
|
|
06553a756b | ||
|
|
c78cbbd002 | ||
|
|
3686feaccc | ||
|
|
d6c3d02205 | ||
|
|
09d96656a0 | ||
|
|
e614690f07 | ||
|
|
3a6c2e48f9 | ||
|
|
0f952a7dbb | ||
|
|
1b34630b8c | ||
|
|
68e173ad50 | ||
|
|
8f81feb1f0 | ||
|
|
ce20fbfff6 | ||
|
|
f0a65207ab | ||
|
|
fde01381d5 | ||
|
|
d479efc01e | ||
|
|
f3d826447d | ||
|
|
f5f26a510f | ||
|
|
aaaf56e62f | ||
|
|
804abddec1 | ||
|
|
8c2ff9374c | ||
|
|
89245360e8 | ||
|
|
8d5f683dc4 | ||
|
|
d614e91b62 | ||
|
|
baf6988d87 | ||
|
|
fae8284b16 | ||
|
|
895e145e61 | ||
|
|
41e0c74fd0 | ||
|
|
d54ff35f02 | ||
|
|
c3622a963d | ||
|
|
77272bea9c | ||
|
|
7cc512c7c0 | ||
|
|
745edc49cd | ||
|
|
86854ffbc5 | ||
|
|
070bc0bb6d | ||
|
|
e20a74d247 | ||
|
|
b0265e0a38 | ||
|
|
b5c1cd7991 | ||
|
|
2feefb4375 | ||
|
|
1bfd827701 | ||
|
|
d2cba75d5f | ||
|
|
49554f47f6 | ||
|
|
453d49573c | ||
|
|
8e0986caec | ||
|
|
b8219b5275 | ||
|
|
63edf8a4a1 | ||
|
|
f6d64738d0 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,6 +1,8 @@
|
||||
.vagrant
|
||||
docker/docker
|
||||
dockerd/dockerd
|
||||
.*.swp
|
||||
a.out
|
||||
*.orig
|
||||
build_src
|
||||
command-line-arguments.test
|
||||
.flymake*
|
||||
|
||||
13
AUTHORS
Normal file
13
AUTHORS
Normal file
@@ -0,0 +1,13 @@
|
||||
Solomon Hykes
|
||||
Sam Alba
|
||||
Jérôme Petazzoni
|
||||
Andrea Luzzardi
|
||||
Joffrey Fuhrer
|
||||
Louis Opter
|
||||
Niall O'Higgins
|
||||
Brian McCallister
|
||||
Jeff Lindsay
|
||||
Ken Cochrane
|
||||
Charles Hooper
|
||||
Guillaume Charmes
|
||||
Daniel Mizyrycki
|
||||
95
Makefile
95
Makefile
@@ -1,23 +1,84 @@
|
||||
GOPATH := $(PWD)/env
|
||||
BUILDPATH := $(PWD)/build
|
||||
PKG_NAME=dotcloud-docker
|
||||
PKG_ARCH=amd64
|
||||
PKG_VERSION=1
|
||||
ROOT_PATH:=$(PWD)
|
||||
BUILD_PATH=build # Do not change, decided by dpkg-buildpackage
|
||||
BUILD_SRC=build_src
|
||||
GITHUB_PATH=src/github.com/dotcloud/docker
|
||||
INSDIR=usr/bin
|
||||
SOURCE_PACKAGE=$(PKG_NAME)_$(PKG_VERSION).orig.tar.gz
|
||||
DEB_PACKAGE=$(PKG_NAME)_$(PKG_VERSION)_$(PKG_ARCH).deb
|
||||
EXTRA_GO_PKG=fs auth
|
||||
|
||||
all: docker.o dockerd.o
|
||||
TMPDIR=$(shell mktemp -d -t XXXXXX)
|
||||
|
||||
env:
|
||||
mkdir -p ${BUILDPATH} ${GOPATH}/src/github.com/dotcloud/
|
||||
ln -s $(PWD) ${GOPATH}/src/github.com/dotcloud/docker
|
||||
|
||||
deps:
|
||||
GOPATH=${GOPATH} go get github.com/kr/pty
|
||||
GOPATH=${GOPATH} go get github.com/mattn/go-sqlite3
|
||||
GOPATH=${GOPATH} go get github.com/shykes/gorp
|
||||
# Build a debian source package
|
||||
all: build_in_deb
|
||||
|
||||
clean:
|
||||
go clean
|
||||
rm -rf env build
|
||||
build_in_deb:
|
||||
echo "GOPATH = " $(ROOT_PATH)
|
||||
mkdir bin
|
||||
cd $(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH) go build -o $(ROOT_PATH)/bin/docker
|
||||
|
||||
docker.o: env deps
|
||||
GOPATH=${GOPATH} go build -o ${BUILDPATH}/docker docker/docker.go
|
||||
# DESTDIR provided by Debian packaging
|
||||
install:
|
||||
# Call this from a go environment (as packaged for deb source package)
|
||||
mkdir -p $(DESTDIR)/$(INSDIR)
|
||||
mkdir -p $(DESTDIR)/etc/init
|
||||
install -m 0755 bin/docker $(DESTDIR)/$(INSDIR)
|
||||
install -o root -m 0755 etc/docker.upstart $(DESTDIR)/etc/init/docker.conf
|
||||
|
||||
dockerd.o: env deps
|
||||
GOPATH=${GOPATH} go build -o ${BUILDPATH}/dockerd dockerd/dockerd.go
|
||||
$(BUILD_SRC): cleanup
|
||||
# Copy ourselves into $BUILD_SRC to comply with unusual golang constraints
|
||||
tar --exclude=*.tar.gz --exclude=checkout.tgz -f checkout.tgz -cz *
|
||||
mkdir -p $(BUILD_SRC)/$(GITHUB_PATH)
|
||||
tar -f checkout.tgz -C $(BUILD_SRC)/$(GITHUB_PATH) -xz
|
||||
cd $(BUILD_SRC)/$(GITHUB_PATH)/docker; GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go get -d
|
||||
for d in `find $(BUILD_SRC) -name '.git*'`; do rm -rf $$d; done
|
||||
# Populate source build with debian stuff
|
||||
cp -R -L ./deb/* $(BUILD_SRC)
|
||||
|
||||
$(SOURCE_PACKAGE): $(BUILD_SRC)
|
||||
rm -f $(SOURCE_PACKAGE)
|
||||
# Create the debian source package
|
||||
tar -f $(SOURCE_PACKAGE) -C ${ROOT_PATH}/${BUILD_SRC} -cz .
|
||||
|
||||
# Build deb package fetching go dependencies and cleaning up git repositories
|
||||
deb: $(DEB_PACKAGE)
|
||||
|
||||
$(DEB_PACKAGE): $(SOURCE_PACKAGE)
|
||||
# dpkg-buildpackage looks for source package tarball in ../
|
||||
cd $(BUILD_SRC); dpkg-buildpackage
|
||||
rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files
|
||||
|
||||
debsrc: $(SOURCE_PACKAGE)
|
||||
|
||||
# Build local sources
|
||||
#$(PKG_NAME): build_local
|
||||
|
||||
build_local:
|
||||
-@mkdir -p bin
|
||||
cd docker && go build -o ../bin/docker
|
||||
|
||||
gotest:
|
||||
@echo "\033[36m[Testing]\033[00m docker..."
|
||||
@sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v && \
|
||||
echo -n "\033[32m[OK]\033[00m" || \
|
||||
echo -n "\033[31m[FAIL]\033[00m"; \
|
||||
echo " docker"
|
||||
@echo "Testing extra repos {$(EXTRA_GO_PKG)}"
|
||||
@for package in $(EXTRA_GO_PKG); do \
|
||||
echo "\033[36m[Testing]\033[00m docker/$$package..." && \
|
||||
cd $$package ; \
|
||||
sudo -E GOPATH=$(ROOT_PATH)/$(BUILD_SRC) go test -v && \
|
||||
echo -n "\033[32m[OK]\033[00m" || \
|
||||
echo -n "\033[31m[FAIL]\033[00m" ; \
|
||||
echo " docker/$$package" ; \
|
||||
cd .. ;\
|
||||
done
|
||||
@sudo rm -rf /tmp/docker-*
|
||||
|
||||
cleanup:
|
||||
|
||||
rm -rf $(BUILD_PATH) debian/$(PKG_NAME)* debian/files $(BUILD_SRC) checkout.tgz
|
||||
|
||||
188
README.md
188
README.md
@@ -1,7 +1,7 @@
|
||||
Docker is a process manager with superpowers
|
||||
============================================
|
||||
Docker: the Linux container runtime
|
||||
===================================
|
||||
|
||||
It encapsulates heterogeneous payloads in Standard Containers, and runs them on any server with strong guarantees of isolation and repeatability.
|
||||
Docker complements LXC with a high-level API with operates at the process level. It runs unix processes with strong guarantees of isolation and repeatability across servers.
|
||||
|
||||
Is is a great building block for automating distributed systems: large-scale web deployments, database clusters, continuous deployment systems, private PaaS, service-oriented architectures, etc.
|
||||
|
||||
@@ -31,7 +31,7 @@ Notable features
|
||||
|
||||
* Change management: changes to a container's filesystem can be committed into a new image and re-used to create more containers. No templating or manual configuration required.
|
||||
|
||||
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throaway interactive shell.
|
||||
* Interactive shell: docker can allocate a pseudo-tty and attach to the standard input of any container, for example to run a throwaway interactive shell.
|
||||
|
||||
|
||||
|
||||
@@ -50,11 +50,36 @@ Under the hood, Docker is built on the following components:
|
||||
* [lxc](http://lxc.sourceforge.net/), a set of convenience scripts to simplify the creation of linux containers.
|
||||
|
||||
|
||||
Setup instructions
|
||||
Install instructions
|
||||
==================
|
||||
|
||||
Requirements
|
||||
------------
|
||||
Installing on Ubuntu 12.04 and 12.10
|
||||
------------------------------------
|
||||
|
||||
1. Install dependencies:
|
||||
|
||||
```bash
|
||||
sudo apt-get install lxc wget bsdtar curl
|
||||
```
|
||||
|
||||
2. Install the latest docker binary:
|
||||
|
||||
```bash
|
||||
wget http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz
|
||||
tar -xf docker-master.tgz
|
||||
```
|
||||
|
||||
3. Run your first container!
|
||||
|
||||
```bash
|
||||
cd docker-master
|
||||
sudo ./docker run -a -i -t base /bin/bash
|
||||
```
|
||||
|
||||
Consider adding docker to your `PATH` for simplicity.
|
||||
|
||||
Installing on other Linux distributions
|
||||
---------------------------------------
|
||||
|
||||
Right now, the officially supported distributions are:
|
||||
|
||||
@@ -63,29 +88,146 @@ Right now, the officially supported distributions are:
|
||||
|
||||
Docker probably works on other distributions featuring a recent kernel, the AUFS patch, and up-to-date lxc. However this has not been tested.
|
||||
|
||||
Installing with Vagrant
|
||||
-----------------------
|
||||
|
||||
Installation
|
||||
---------------
|
||||
Currently, Docker can be installed with Vagrant both on your localhost
|
||||
with VirtualBox as well as on Amazon EC2. Vagrant 1.1 is required for
|
||||
EC2, but deploying is as simple as:
|
||||
|
||||
1. Set up your host of choice on a physical / virtual machine
|
||||
2. Assume root identity on your newly installed environment (`sudo -s`)
|
||||
3. Type the following commands:
|
||||
```bash
|
||||
$ export AWS_ACCESS_KEY_ID=xxx \
|
||||
AWS_SECRET_ACCESS_KEY=xxx \
|
||||
AWS_KEYPAIR_NAME=xxx \
|
||||
AWS_SSH_PRIVKEY=xxx
|
||||
$ vagrant plugin install vagrant-aws
|
||||
$ vagrant up --provider=aws
|
||||
```
|
||||
|
||||
apt-get update
|
||||
apt-get install lxc wget bsdtar curl
|
||||
The environment variables are:
|
||||
|
||||
* `AWS_ACCESS_KEY_ID` - The API key used to make requests to AWS
|
||||
* `AWS_SECRET_ACCESS_KEY` - The secret key to make AWS API requests
|
||||
* `AWS_KEYPAIR_NAME` - The name of the keypair used for this EC2 instance
|
||||
* `AWS_SSH_PRIVKEY` - The path to the private key for the named keypair
|
||||
|
||||
For VirtualBox, you can simply ignore setting any of the environment
|
||||
variables and omit the ``provider`` flag. VirtualBox is still supported with
|
||||
Vagrant <= 1.1:
|
||||
|
||||
```bash
|
||||
$ vagrant up
|
||||
```
|
||||
|
||||
|
||||
|
||||
Usage examples
|
||||
==============
|
||||
|
||||
Running an interactive shell
|
||||
----------------------------
|
||||
|
||||
```bash
|
||||
# Download a base image
|
||||
docker import base
|
||||
|
||||
# Run an interactive shell in the base image,
|
||||
# allocate a tty, attach stdin and stdout
|
||||
docker run -a -i -t base /bin/bash
|
||||
```
|
||||
|
||||
|
||||
Starting a long-running worker process
|
||||
--------------------------------------
|
||||
|
||||
```bash
|
||||
# Run docker in daemon mode
|
||||
(docker -d || echo "Docker daemon already running") &
|
||||
|
||||
# Start a very useful long-running process
|
||||
JOB=$(docker run base /bin/sh -c "while true; do echo Hello world!; sleep 1; done")
|
||||
|
||||
# Collect the output of the job so far
|
||||
docker logs $JOB
|
||||
|
||||
# Kill the job
|
||||
docker kill $JOB
|
||||
```
|
||||
|
||||
|
||||
Listing all running containers
|
||||
------------------------------
|
||||
|
||||
```bash
|
||||
docker ps
|
||||
```
|
||||
|
||||
|
||||
Expose a service on a TCP port
|
||||
------------------------------
|
||||
|
||||
```bash
|
||||
# Expose port 4444 of this container, and tell netcat to listen on it
|
||||
JOB=$(docker run -p 4444 base /bin/nc -l -p 4444)
|
||||
|
||||
# Which public port is NATed to my container?
|
||||
PORT=$(docker port $JOB 4444)
|
||||
|
||||
# Connect to the public port via the host's public address
|
||||
echo hello world | nc $(hostname) $PORT
|
||||
|
||||
# Verify that the network connection worked
|
||||
echo "Daemon received: $(docker logs $JOB)"
|
||||
```
|
||||
|
||||
Contributing to Docker
|
||||
======================
|
||||
|
||||
Want to hack on Docker? Awesome! Here are instructions to get you started. They are probably not perfect, please let us know if anything feels wrong or incomplete.
|
||||
|
||||
Contribution guidelines
|
||||
-----------------------
|
||||
|
||||
### Pull requests are always welcome
|
||||
|
||||
We are always thrilled to receive pull requests, and do our best to process them as fast as possible. Not sure if that typo is worth a pull request? Do it! We will appreciate it.
|
||||
|
||||
If your pull request is not accepted on the first try, don't be discouraged! If there's a problen with the implementation, hopefully you received feedback on what to improve.
|
||||
|
||||
We're trying very hard to keep Docker lean and focused. We don't want it to do everything for everybody. This means that we might decide against incorporating a new feature.
|
||||
However there might be a way to implement that feature *on top of* docker.
|
||||
|
||||
### Discuss your design on the mailing list
|
||||
|
||||
We recommend discussing your plans [on the mailing list](https://groups.google.com/forum/?fromgroups#!forum/docker-club) before starting to code - especially for more ambitious contributions. This gives other contributors a chance to point
|
||||
you in the right direction, give feedback on your design, and maybe point out if someone else is working on the same thing.
|
||||
|
||||
### Create issues...
|
||||
|
||||
Any significant improvement should be documented as a github issue before anybody start working on it.
|
||||
|
||||
### ...but check for existing issues first!
|
||||
|
||||
Please take a moment to check that an issue doesn't already exist documenting your bug report or improvement proposal.
|
||||
If it does, it never hurts to add a quick "+1" or "I have this problem too". This will help prioritize the most common problems and requests.
|
||||
|
||||
|
||||
### Write tests
|
||||
|
||||
Golang has a great testing suite built in: use it! Take a look at existing tests for inspiration.
|
||||
|
||||
|
||||
|
||||
Setting up a dev environment
|
||||
----------------------------
|
||||
|
||||
Coming soon!
|
||||
|
||||
4. Download the latest docker binaries: `wget http://docker.io.s3.amazonaws.com/builds/$(uname -s)/$(uname -m)/docker-master.tgz` ([Or get the Linux/x86_64 binaries here](http://docker.io.s3.amazonaws.com/builds/Linux/x86_64/docker-master.tgz) )
|
||||
5. Extract the contents of the tar file `tar -xf docker-master.tar.gz`
|
||||
6. Launch the docker daemon in the background `./dockerd &`
|
||||
7. Download a base image `./docker pull base`
|
||||
8. Run your first container! `./docker run -i -a -t base /bin/bash`
|
||||
9. Start exploring `./docker --help`
|
||||
|
||||
Consider adding docker and dockerd to your `PATH` for simplicity.
|
||||
|
||||
|
||||
What is a Standard Container?
|
||||
-----------------------------
|
||||
=============================
|
||||
|
||||
Docker defines a unit of software delivery called a Standard Container. The goal of a Standard Container is to encapsulate a software component and all its dependencies in
|
||||
a format that is self-describing and portable, so that any compliant runtime can run it without extra dependency, regardless of the underlying machine and the contents of the container.
|
||||
@@ -101,7 +243,7 @@ Just like shipping containers, Standard Containers define a set of STANDARD OPER
|
||||
|
||||
### 2. CONTENT-AGNOSTIC
|
||||
|
||||
Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffe or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
|
||||
Just like shipping containers, Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents. A shipping container will be stacked in exactly the same way whether it contains Vietnamese powder coffee or spare Maserati parts. Similarly, Standard Containers are started or uploaded in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
|
||||
|
||||
|
||||
### 3. INFRASTRUCTURE-AGNOSTIC
|
||||
|
||||
23
Vagrantfile
vendored
23
Vagrantfile
vendored
@@ -1,7 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant::Config.run do |config|
|
||||
Vagrant.configure("1") do |config|
|
||||
# All Vagrant configuration is done here. The most common configuration
|
||||
# options are documented and commented below. For a complete reference,
|
||||
# please see the online documentation at vagrantup.com.
|
||||
@@ -34,7 +34,7 @@ Vagrant::Config.run do |config|
|
||||
# Share an additional folder to the guest VM. The first argument is
|
||||
# an identifier, the second is the path on the guest to mount the
|
||||
# folder, and the third is the path on the host to the actual folder.
|
||||
# config.vm.share_folder "v-data", "/vagrant_data", "../data"
|
||||
config.vm.share_folder "v-data", "~/docker", "~/docker"
|
||||
|
||||
# Enable provisioning with Puppet stand alone. Puppet manifests
|
||||
# are contained in a directory path relative to this Vagrantfile.
|
||||
@@ -98,3 +98,22 @@ Vagrant::Config.run do |config|
|
||||
#
|
||||
# chef.validation_client_name = "ORGNAME-validator"
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-1c1e8075"
|
||||
aws.ssh_username = "vagrant"
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = "quantal64_3.5.0-25"
|
||||
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
|
||||
end
|
||||
end
|
||||
|
||||
151
auth/auth.go
Normal file
151
auth/auth.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Where we store the config file
|
||||
const CONFIGFILE = "/var/lib/docker/.dockercfg"
|
||||
|
||||
// the registry server we want to login against
|
||||
const REGISTRY_SERVER = "http://registry.docker.io"
|
||||
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// create a base64 encoded auth string to store in config
|
||||
func EncodeAuth(authConfig AuthConfig) string {
|
||||
authStr := authConfig.Username + ":" + authConfig.Password
|
||||
msg := []byte(authStr)
|
||||
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
||||
base64.StdEncoding.Encode(encoded, msg)
|
||||
return string(encoded)
|
||||
}
|
||||
|
||||
// decode the auth string
|
||||
func DecodeAuth(authStr string) (AuthConfig, error) {
|
||||
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
||||
decoded := make([]byte, decLen)
|
||||
authByte := []byte(authStr)
|
||||
n, err := base64.StdEncoding.Decode(decoded, authByte)
|
||||
if err != nil {
|
||||
return AuthConfig{}, err
|
||||
}
|
||||
if n > decLen {
|
||||
return AuthConfig{}, errors.New("something went wrong decoding auth config")
|
||||
}
|
||||
arr := strings.Split(string(decoded), ":")
|
||||
password := strings.Trim(arr[1], "\x00")
|
||||
return AuthConfig{Username: arr[0], Password: password}, nil
|
||||
|
||||
}
|
||||
|
||||
// load up the auth config information and return values
|
||||
func LoadConfig() (AuthConfig, error) {
|
||||
if _, err := os.Stat(CONFIGFILE); err == nil {
|
||||
b, err := ioutil.ReadFile(CONFIGFILE)
|
||||
if err != nil {
|
||||
return AuthConfig{}, err
|
||||
}
|
||||
arr := strings.Split(string(b), "\n")
|
||||
orig_auth := strings.Split(arr[0], " = ")
|
||||
orig_email := strings.Split(arr[1], " = ")
|
||||
authConfig, err := DecodeAuth(orig_auth[1])
|
||||
if err != nil {
|
||||
return AuthConfig{}, err
|
||||
}
|
||||
authConfig.Email = orig_email[1]
|
||||
return authConfig, nil
|
||||
} else {
|
||||
return AuthConfig{}, nil
|
||||
}
|
||||
return AuthConfig{}, nil
|
||||
}
|
||||
|
||||
// save the auth config
|
||||
func saveConfig(authStr string, email string) error {
|
||||
lines := "auth = " + authStr + "\n" + "email = " + email + "\n"
|
||||
b := []byte(lines)
|
||||
err := ioutil.WriteFile(CONFIGFILE, b, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// try to register/login to the registry server
|
||||
func Login(authConfig AuthConfig) (string, error) {
|
||||
storeConfig := false
|
||||
reqStatusCode := 0
|
||||
var status string
|
||||
var errMsg string
|
||||
var reqBody []byte
|
||||
jsonBody, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Config Error: %s", err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
b := strings.NewReader(string(jsonBody))
|
||||
req1, err := http.Post(REGISTRY_SERVER+"/v1/users", "application/json; charset=utf-8", b)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Server Error: %s", err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
reqStatusCode = req1.StatusCode
|
||||
defer req1.Body.Close()
|
||||
reqBody, err = ioutil.ReadAll(req1.Body)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("Server Error: [%#v] %s", reqStatusCode, err)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
if reqStatusCode == 201 {
|
||||
status = "Account Created\n"
|
||||
storeConfig = true
|
||||
} else if reqStatusCode == 400 {
|
||||
if string(reqBody) == "Username or email already exist" {
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", REGISTRY_SERVER+"/v1/users", nil)
|
||||
req.SetBasicAuth(authConfig.Username, authConfig.Password)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
status = "Login Succeeded\n"
|
||||
storeConfig = true
|
||||
} else {
|
||||
status = fmt.Sprintf("Login: %s", body)
|
||||
return "", errors.New(status)
|
||||
}
|
||||
} else {
|
||||
status = fmt.Sprintf("Registration: %s", string(reqBody))
|
||||
return "", errors.New(status)
|
||||
}
|
||||
} else {
|
||||
status = fmt.Sprintf("[%s] : %s", reqStatusCode, string(reqBody))
|
||||
return "", errors.New(status)
|
||||
}
|
||||
if storeConfig {
|
||||
authStr := EncodeAuth(authConfig)
|
||||
saveConfig(authStr, authConfig.Email)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
23
auth/auth_test.go
Normal file
23
auth/auth_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncodeAuth(t *testing.T) {
|
||||
newAuthConfig := AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"}
|
||||
authStr := EncodeAuth(newAuthConfig)
|
||||
decAuthConfig, err := DecodeAuth(authStr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if newAuthConfig.Username != decAuthConfig.Username {
|
||||
t.Fatal("Encode Username doesn't match decoded Username")
|
||||
}
|
||||
if newAuthConfig.Password != decAuthConfig.Password {
|
||||
t.Fatal("Encode Password doesn't match decoded Password")
|
||||
}
|
||||
if authStr != "a2VuOnRlc3Q=" {
|
||||
t.Fatal("AuthString encoding isn't correct.")
|
||||
}
|
||||
}
|
||||
126
client/client.go
126
client/client.go
@@ -1,126 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"../future"
|
||||
"../rcli"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Run docker in "simple mode": run a single command and return.
|
||||
func SimpleMode(args []string) error {
|
||||
var oldState *State
|
||||
var err error
|
||||
if IsTerminal(0) && os.Getenv("NORAW") == "" {
|
||||
oldState, err = MakeRaw(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer Restore(0, oldState)
|
||||
}
|
||||
// FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose
|
||||
// CloseWrite(), which we need to cleanly signal that stdin is closed without
|
||||
// closing the connection.
|
||||
// See http://code.google.com/p/go/issues/detail?id=3345
|
||||
conn, err := rcli.Call("tcp", "127.0.0.1:4242", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
receive_stdout := future.Go(func() error {
|
||||
_, err := io.Copy(os.Stdout, conn)
|
||||
return err
|
||||
})
|
||||
send_stdin := future.Go(func() error {
|
||||
_, err := io.Copy(conn, os.Stdin)
|
||||
if err := conn.CloseWrite(); err != nil {
|
||||
log.Printf("Couldn't send EOF: " + err.Error())
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err := <-receive_stdout; err != nil {
|
||||
return err
|
||||
}
|
||||
if oldState != nil {
|
||||
Restore(0, oldState)
|
||||
}
|
||||
if !IsTerminal(0) {
|
||||
if err := <-send_stdin; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run docker in "interactive mode": run a bash-compatible shell capable of running docker commands.
|
||||
func InteractiveMode(scripts ...string) error {
|
||||
// Determine path of current docker binary
|
||||
dockerPath, err := exec.LookPath(os.Args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dockerPath, err = filepath.Abs(dockerPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a temp directory
|
||||
tmp, err := ioutil.TempDir("", "docker-shell")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
// For each command, create an alias in temp directory
|
||||
// FIXME: generate this list dynamically with introspection of some sort
|
||||
// It might make sense to merge docker and dockerd to keep that introspection
|
||||
// within a single binary.
|
||||
for _, cmd := range []string{
|
||||
"help",
|
||||
"run",
|
||||
"ps",
|
||||
"pull",
|
||||
"put",
|
||||
"rm",
|
||||
"kill",
|
||||
"wait",
|
||||
"stop",
|
||||
"start",
|
||||
"restart",
|
||||
"logs",
|
||||
"diff",
|
||||
"commit",
|
||||
"attach",
|
||||
"info",
|
||||
"tar",
|
||||
"web",
|
||||
"images",
|
||||
"docker",
|
||||
} {
|
||||
if err := os.Symlink(dockerPath, path.Join(tmp, cmd)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Run $SHELL with PATH set to temp directory
|
||||
rcfile, err := ioutil.TempFile("", "docker-shell-rc")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(rcfile.Name())
|
||||
io.WriteString(rcfile, "enable -n help\n")
|
||||
os.Setenv("PATH", tmp+":"+os.Getenv("PATH"))
|
||||
os.Setenv("PS1", "\\h docker> ")
|
||||
shell := exec.Command("/bin/bash", append([]string{"--rcfile", rcfile.Name()}, scripts...)...)
|
||||
shell.Stdin = os.Stdin
|
||||
shell.Stdout = os.Stdout
|
||||
shell.Stderr = os.Stderr
|
||||
if err := shell.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,16 +1,17 @@
|
||||
package server
|
||||
package docker
|
||||
|
||||
import (
|
||||
".."
|
||||
"../fs"
|
||||
"../future"
|
||||
"../rcli"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/fs"
|
||||
"github.com/dotcloud/docker/future"
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -22,16 +23,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const VERSION = "0.0.1"
|
||||
|
||||
func (srv *Server) ListenAndServe() error {
|
||||
go rcli.ListenAndServeHTTP("127.0.0.1:8080", srv)
|
||||
// FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose
|
||||
// CloseWrite(), which we need to cleanly signal that stdin is closed without
|
||||
// closing the connection.
|
||||
// See http://code.google.com/p/go/issues/detail?id=3345
|
||||
return rcli.ListenAndServe("tcp", "127.0.0.1:4242", srv)
|
||||
}
|
||||
const VERSION = "0.0.3"
|
||||
|
||||
func (srv *Server) Name() string {
|
||||
return "docker"
|
||||
@@ -55,22 +47,22 @@ func (srv *Server) Help() string {
|
||||
{"inspect", "Return low-level information on a container"},
|
||||
{"kill", "Kill a running container"},
|
||||
{"layers", "(debug only) List filesystem layers"},
|
||||
{"login", "Register or Login to the docker registry server"},
|
||||
{"logs", "Fetch the logs of a container"},
|
||||
{"ls", "List the contents of a container's directory"},
|
||||
{"mirror", "(debug only) (No documentation available)"},
|
||||
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
|
||||
{"ps", "List containers"},
|
||||
{"pull", "Download a new image from a remote location"},
|
||||
{"put", "Import a new image from a local archive"},
|
||||
{"reset", "Reset changes to a container's filesystem"},
|
||||
{"restart", "Restart a running container"},
|
||||
{"rm", "Remove a container"},
|
||||
{"rmimage", "Remove an image"},
|
||||
{"rmi", "Remove an image"},
|
||||
{"run", "Run a command in a new container"},
|
||||
{"start", "Start a stopped container"},
|
||||
{"stop", "Stop a running container"},
|
||||
{"tar", "Stream the contents of a container as a tar archive"},
|
||||
{"umount", "(debug only) Mount a container's filesystem"},
|
||||
{"version", "Show the docker version information"},
|
||||
{"wait", "Block until a container stops, then print its exit code"},
|
||||
{"web", "A web UI for docker"},
|
||||
{"write", "Write the contents of standard input to a container's file"},
|
||||
@@ -80,6 +72,53 @@ func (srv *Server) Help() string {
|
||||
return help
|
||||
}
|
||||
|
||||
// 'docker login': login / register a user to registry service.
|
||||
func (srv *Server) CmdLogin(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
cmd := rcli.Subcmd(stdout, "login", "", "Register or Login to the docker registry server")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
var username string
|
||||
var password string
|
||||
var email string
|
||||
authConfig, err := auth.LoadConfig()
|
||||
if err != nil {
|
||||
fmt.Fprintf(stdout, "Error : %s\n", err)
|
||||
}
|
||||
|
||||
fmt.Fprint(stdout, "Username (", authConfig.Username, "): ")
|
||||
fmt.Fscanf(stdin, "%s", &username)
|
||||
if username == "" {
|
||||
username = authConfig.Username
|
||||
}
|
||||
if username != authConfig.Username {
|
||||
fmt.Fprint(stdout, "Password: ")
|
||||
fmt.Fscanf(stdin, "%s", &password)
|
||||
|
||||
if password == "" {
|
||||
return errors.New("Error : Password Required\n")
|
||||
}
|
||||
|
||||
fmt.Fprint(stdout, "Email (", authConfig.Email, "): ")
|
||||
fmt.Fscanf(stdin, "%s", &email)
|
||||
if email == "" {
|
||||
email = authConfig.Email
|
||||
}
|
||||
} else {
|
||||
password = authConfig.Password
|
||||
email = authConfig.Email
|
||||
}
|
||||
newAuthConfig := auth.AuthConfig{Username: username, Password: password, Email: email}
|
||||
status, err := auth.Login(newAuthConfig)
|
||||
if err != nil {
|
||||
fmt.Fprintf(stdout, "Error : %s\n", err)
|
||||
}
|
||||
if status != "" {
|
||||
fmt.Fprintf(stdout, status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// 'docker wait': block until a container stops
|
||||
func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
cmd := rcli.Subcmd(stdout, "wait", "[OPTIONS] NAME", "Block until a container stops, then print its exit code.")
|
||||
@@ -100,6 +139,12 @@ func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string
|
||||
return nil
|
||||
}
|
||||
|
||||
// 'docker version': show version information
|
||||
func (srv *Server) CmdVersion(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
fmt.Fprintf(stdout, "Version:%s\n", VERSION)
|
||||
return nil
|
||||
}
|
||||
|
||||
// 'docker info': display system-wide information.
|
||||
func (srv *Server) CmdInfo(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
images, _ := srv.images.Images()
|
||||
@@ -312,10 +357,14 @@ func (srv *Server) CmdInspect(stdin io.ReadCloser, stdout io.Writer, args ...str
|
||||
var obj interface{}
|
||||
if container := srv.containers.Get(name); container != nil {
|
||||
obj = container
|
||||
//} else if image, err := srv.images.List(name); image != nil {
|
||||
// obj = image
|
||||
} else if image, err := srv.images.Find(name); err != nil {
|
||||
return err
|
||||
} else if image != nil {
|
||||
obj = image
|
||||
} else {
|
||||
return errors.New("No such container or image: " + name)
|
||||
// No output means the object does not exist
|
||||
// (easier to script since stdout and stderr are not differentiated atm)
|
||||
return nil
|
||||
}
|
||||
data, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
@@ -328,6 +377,7 @@ func (srv *Server) CmdInspect(stdin io.ReadCloser, stdout io.Writer, args ...str
|
||||
if _, err := io.Copy(stdout, indented); err != nil {
|
||||
return err
|
||||
}
|
||||
stdout.Write([]byte{'\n'})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -355,34 +405,34 @@ func (srv *Server) CmdPort(stdin io.ReadCloser, stdout io.Writer, args ...string
|
||||
}
|
||||
|
||||
// 'docker rmi NAME' removes all images with the name NAME
|
||||
// func (srv *Server) CmdRmi(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
// cmd := rcli.Subcmd(stdout, "rmimage", "[OPTIONS] IMAGE", "Remove an image")
|
||||
// fl_regexp := cmd.Bool("r", false, "Use IMAGE as a regular expression instead of an exact name")
|
||||
// if err := cmd.Parse(args); err != nil {
|
||||
// cmd.Usage()
|
||||
// return nil
|
||||
// }
|
||||
// if cmd.NArg() < 1 {
|
||||
// cmd.Usage()
|
||||
// return nil
|
||||
// }
|
||||
// for _, name := range cmd.Args() {
|
||||
// var err error
|
||||
// if *fl_regexp {
|
||||
// err = srv.images.DeleteMatch(name)
|
||||
// } else {
|
||||
// image := srv.images.Find(name)
|
||||
// if image == nil {
|
||||
// return errors.New("No such image: " + name)
|
||||
// }
|
||||
// err = srv.images.Delete(name)
|
||||
// }
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
func (srv *Server) CmdRmi(stdin io.ReadCloser, stdout io.Writer, args ...string) (err error) {
|
||||
cmd := rcli.Subcmd(stdout, "rmimage", "[OPTIONS] IMAGE", "Remove an image")
|
||||
fl_all := cmd.Bool("a", false, "Use IMAGE as a path and remove ALL images in this path")
|
||||
fl_regexp := cmd.Bool("r", false, "Use IMAGE as a regular expression instead of an exact name")
|
||||
if cmd.Parse(args) != nil || cmd.NArg() < 1 {
|
||||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
for _, name := range cmd.Args() {
|
||||
if *fl_regexp {
|
||||
err = srv.images.RemoveRegexp(name)
|
||||
} else if *fl_all {
|
||||
err = srv.images.RemoveInPath(name)
|
||||
} else {
|
||||
if image, err1 := srv.images.Find(name); err1 != nil {
|
||||
err = err1
|
||||
} else if err1 == nil && image == nil {
|
||||
err = fmt.Errorf("No such image: %s", name)
|
||||
} else {
|
||||
err = srv.images.Remove(image)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) CmdRm(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
cmd := rcli.Subcmd(stdout, "rm", "[OPTIONS] CONTAINER", "Remove a container")
|
||||
@@ -422,10 +472,12 @@ func (srv *Server) CmdKill(stdin io.ReadCloser, stdout io.Writer, args ...string
|
||||
func (srv *Server) CmdImport(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
cmd := rcli.Subcmd(stdout, "import", "[OPTIONS] NAME", "Create a new filesystem image from the contents of a tarball")
|
||||
fl_stdin := cmd.Bool("stdin", false, "Read tarball from stdin")
|
||||
var archive io.Reader
|
||||
var resp *http.Response
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
var archive io.Reader
|
||||
name := cmd.Arg(0)
|
||||
if name == "" {
|
||||
return errors.New("Not enough arguments")
|
||||
@@ -440,22 +492,18 @@ func (srv *Server) CmdImport(stdin io.ReadCloser, stdout io.Writer, args ...stri
|
||||
if u.Scheme == "" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
// FIXME: hardcode a mirror URL that does not depend on a single provider.
|
||||
if u.Host == "" {
|
||||
u.Host = "s3.amazonaws.com"
|
||||
u.Path = path.Join("/docker.io/images", u.Path)
|
||||
u.Host = "get.docker.io"
|
||||
u.Path = path.Join("/images", u.Path)
|
||||
}
|
||||
fmt.Fprintf(stdout, "Downloading from %s\n", u.String())
|
||||
// Download with curl (pretty progress bar)
|
||||
// If curl is not available, fallback to http.Get()
|
||||
archive, err = future.Curl(u.String(), stdout)
|
||||
resp, err = future.Download(u.String(), stdout)
|
||||
if err != nil {
|
||||
if resp, err := http.Get(u.String()); err != nil {
|
||||
return err
|
||||
} else {
|
||||
archive = resp.Body
|
||||
}
|
||||
return err
|
||||
}
|
||||
archive = future.ProgressReader(resp.Body, int(resp.ContentLength), stdout)
|
||||
}
|
||||
fmt.Fprintf(stdout, "Unpacking to %s\n", name)
|
||||
img, err := srv.images.Create(archive, nil, name, "")
|
||||
@@ -548,7 +596,7 @@ func (srv *Server) CmdPs(stdin io.ReadCloser, stdout io.Writer, args ...string)
|
||||
if !*quiet {
|
||||
command := fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
|
||||
if !*fl_full {
|
||||
command = docker.Trunc(command, 20)
|
||||
command = Trunc(command, 20)
|
||||
}
|
||||
for idx, field := range []string{
|
||||
/* ID */ container.Id,
|
||||
@@ -739,10 +787,10 @@ func (srv *Server) CmdLogs(stdin io.ReadCloser, stdout io.Writer, args ...string
|
||||
return errors.New("No such container: " + cmd.Arg(0))
|
||||
}
|
||||
|
||||
func (srv *Server) CreateContainer(img *fs.Image, ports []int, user string, tty bool, openStdin bool, memory int64, comment string, cmd string, args ...string) (*docker.Container, error) {
|
||||
func (srv *Server) CreateContainer(img *fs.Image, ports []int, user string, tty bool, openStdin bool, memory int64, comment string, cmd string, args ...string) (*Container, error) {
|
||||
id := future.RandomId()[:8]
|
||||
container, err := srv.containers.Create(id, cmd, args, img,
|
||||
&docker.Config{
|
||||
&Config{
|
||||
Hostname: id,
|
||||
Ports: ports,
|
||||
User: user,
|
||||
@@ -835,12 +883,16 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout io.Writer, args ...string)
|
||||
fl_comment := cmd.String("c", "", "Comment")
|
||||
fl_memory := cmd.Int64("m", 0, "Memory limit (in bytes)")
|
||||
var fl_ports ports
|
||||
|
||||
cmd.Var(&fl_ports, "p", "Map a network port to the container")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
name := cmd.Arg(0)
|
||||
var img_name string
|
||||
//var img_version string // Only here for reference
|
||||
var cmdline []string
|
||||
|
||||
if len(cmd.Args()) >= 2 {
|
||||
cmdline = cmd.Args()[1:]
|
||||
}
|
||||
@@ -848,6 +900,7 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout io.Writer, args ...string)
|
||||
if name == "" {
|
||||
name = "base"
|
||||
}
|
||||
|
||||
// Choose a default command if needed
|
||||
if len(cmdline) == 0 {
|
||||
*fl_stdin = true
|
||||
@@ -855,13 +908,31 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout io.Writer, args ...string)
|
||||
*fl_attach = true
|
||||
cmdline = []string{"/bin/bash", "-i"}
|
||||
}
|
||||
|
||||
// Find the image
|
||||
img, err := srv.images.Find(name)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if img == nil {
|
||||
return errors.New("No such image: " + name)
|
||||
// Separate the name:version tag
|
||||
if strings.Contains(name, ":") {
|
||||
parts := strings.SplitN(name, ":", 2)
|
||||
img_name = parts[0]
|
||||
//img_version = parts[1] // Only here for reference
|
||||
} else {
|
||||
img_name = name
|
||||
}
|
||||
|
||||
stdin_noclose := ioutil.NopCloser(stdin)
|
||||
if err := srv.CmdImport(stdin_noclose, stdout, img_name); err != nil {
|
||||
return err
|
||||
}
|
||||
img, err = srv.images.Find(name)
|
||||
if err != nil || img == nil {
|
||||
return errors.New("Could not find image after downloading: " + name)
|
||||
}
|
||||
}
|
||||
|
||||
// Create new container
|
||||
container, err := srv.CreateContainer(img, fl_ports, *fl_user, *fl_tty,
|
||||
*fl_stdin, *fl_memory, *fl_comment, cmdline[0], cmdline[1:]...)
|
||||
@@ -920,12 +991,12 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout io.Writer, args ...string)
|
||||
return nil
|
||||
}
|
||||
|
||||
func New() (*Server, error) {
|
||||
func NewServer() (*Server, error) {
|
||||
future.Seed()
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
containers, err := docker.New()
|
||||
containers, err := New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -976,6 +1047,6 @@ func (srv *Server) CmdWeb(stdin io.ReadCloser, stdout io.Writer, args ...string)
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
containers *docker.Docker
|
||||
containers *Docker
|
||||
images *fs.Store
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"./fs"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/dotcloud/docker/fs"
|
||||
"github.com/kr/pty"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"./fs"
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/fs"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
|
||||
4
contrib/README
Normal file
4
contrib/README
Normal file
@@ -0,0 +1,4 @@
|
||||
The `contrib` directory contains scripts, images, and other helpful things
|
||||
which are not part of the core docker distribution. Please note that they
|
||||
could be out of date, since they do not receive the same attention as the
|
||||
rest of the repository.
|
||||
4
install.sh → contrib/install.sh
Normal file → Executable file
4
install.sh → contrib/install.sh
Normal file → Executable file
@@ -38,14 +38,14 @@ fi
|
||||
echo "Downloading docker binary and uncompressing into /usr/local/bin..."
|
||||
curl -s http://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-master.tgz |
|
||||
tar -C /usr/local/bin --strip-components=1 -zxf- \
|
||||
docker-master/docker docker-master/dockerd
|
||||
docker-master/docker
|
||||
|
||||
if [ -f /etc/init/dockerd.conf ]
|
||||
then
|
||||
echo "Upstart script already exists."
|
||||
else
|
||||
echo "Creating /etc/init/dockerd.conf..."
|
||||
echo "exec /usr/local/bin/dockerd" > /etc/init/dockerd.conf
|
||||
echo "exec /usr/local/bin/docker -d" > /etc/init/dockerd.conf
|
||||
fi
|
||||
|
||||
echo "Starting dockerd..."
|
||||
40
contrib/mkimage-busybox.sh
Executable file
40
contrib/mkimage-busybox.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
# Generate a very minimal filesystem based on busybox-static,
|
||||
# and load it into the local docker under the name "busybox".
|
||||
|
||||
BUSYBOX=$(which busybox)
|
||||
[ "$BUSYBOX" ] || {
|
||||
echo "Sorry, I could not locate busybox."
|
||||
echo "Try 'apt-get install busybox-static'?"
|
||||
exit 1
|
||||
}
|
||||
|
||||
set -e
|
||||
ROOTFS=/tmp/rootfs-busybox-$$-$RANDOM
|
||||
mkdir $ROOTFS
|
||||
cd $ROOTFS
|
||||
|
||||
mkdir bin etc dev dev/pts lib proc sys tmp
|
||||
touch etc/resolv.conf
|
||||
cp /etc/nsswitch.conf etc/nsswitch.conf
|
||||
echo root:x:0:0:root:/:/bin/sh > etc/passwd
|
||||
echo root:x:0: > etc/group
|
||||
ln -s lib lib64
|
||||
ln -s bin sbin
|
||||
cp $BUSYBOX bin
|
||||
for X in $(busybox --list)
|
||||
do
|
||||
ln -s busybox bin/$X
|
||||
done
|
||||
rm bin/init
|
||||
ln bin/busybox bin/init
|
||||
cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib
|
||||
cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib
|
||||
for X in console null ptmx random stdin stdout stderr tty urandom zero
|
||||
do
|
||||
cp -a /dev/$X dev
|
||||
done
|
||||
|
||||
tar -cf- . | docker put busybox
|
||||
docker run -i -a -u root busybox /bin/echo Success.
|
||||
|
||||
1
deb/Makefile
Symbolic link
1
deb/Makefile
Symbolic link
@@ -0,0 +1 @@
|
||||
../Makefile
|
||||
1
deb/README.md
Symbolic link
1
deb/README.md
Symbolic link
@@ -0,0 +1 @@
|
||||
../README.md
|
||||
5
deb/debian/changelog
Normal file
5
deb/debian/changelog
Normal file
@@ -0,0 +1,5 @@
|
||||
dotcloud-docker (1) precise; urgency=low
|
||||
|
||||
* Initial release
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 14 Mar 2013 04:43:21 -0700
|
||||
1
deb/debian/compat
Normal file
1
deb/debian/compat
Normal file
@@ -0,0 +1 @@
|
||||
8
|
||||
20
deb/debian/control
Normal file
20
deb/debian/control
Normal file
@@ -0,0 +1,20 @@
|
||||
Source: dotcloud-docker
|
||||
Section: misc
|
||||
Priority: extra
|
||||
Homepage: https://github.com/dotcloud/docker
|
||||
Maintainer: Daniel Mizyrycki <daniel@dotcloud.com>
|
||||
Build-Depends: debhelper (>= 8.0.0), pkg-config, git, golang, libsqlite3-dev
|
||||
Vcs-Git: https://github.com/dotcloud/docker.git
|
||||
Standards-Version: 3.9.2
|
||||
|
||||
Package: dotcloud-docker
|
||||
Architecture: amd64
|
||||
Provides: dotcloud-docker
|
||||
Depends: lxc, wget, bsdtar, curl, sqlite3
|
||||
Conflicts: docker
|
||||
Description: A process manager with superpowers
|
||||
It encapsulates heterogeneous payloads in Standard Containers, and runs
|
||||
them on any server with strong guarantees of isolation and repeatability.
|
||||
Is is a great building block for automating distributed systems:
|
||||
large-scale web deployments, database clusters, continuous deployment
|
||||
systems, private PaaS, service-oriented architectures, etc.
|
||||
209
deb/debian/copyright
Normal file
209
deb/debian/copyright
Normal file
@@ -0,0 +1,209 @@
|
||||
Format: http://dep.debian.net/deps/dep5
|
||||
Upstream-Name: dotcloud-docker
|
||||
Source: https://github.com/dotcloud/docker
|
||||
|
||||
Files: *
|
||||
Copyright: 2012 DotCloud Inc (opensource@dotcloud.com)
|
||||
License: Apache License Version 2.0
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2012 DotCloud Inc (opensource@dotcloud.com)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
deb/debian/docs
Normal file
1
deb/debian/docs
Normal file
@@ -0,0 +1 @@
|
||||
README.md
|
||||
13
deb/debian/rules
Executable file
13
deb/debian/rules
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/make -f
|
||||
# -*- makefile -*-
|
||||
# Sample debian/rules that uses debhelper.
|
||||
# This file was originally written by Joey Hess and Craig Small.
|
||||
# As a special exception, when this file is copied by dh-make into a
|
||||
# dh-make output file, you may use that output file without restriction.
|
||||
# This special exception was added by Craig Small in version 0.37 of dh-make.
|
||||
|
||||
# Uncomment this to turn on verbose mode.
|
||||
#export DH_VERBOSE=1
|
||||
|
||||
%:
|
||||
dh $@
|
||||
1
deb/debian/source/format
Normal file
1
deb/debian/source/format
Normal file
@@ -0,0 +1 @@
|
||||
3.0 (quilt)
|
||||
10
deb/etc/docker-dev.upstart
Normal file
10
deb/etc/docker-dev.upstart
Normal file
@@ -0,0 +1,10 @@
|
||||
description "Run docker"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on starting rc RUNLEVEL=[016]
|
||||
respawn
|
||||
|
||||
script
|
||||
test -f /etc/default/locale && . /etc/default/locale || true
|
||||
LANG=$LANG LC_ALL=$LANG /usr/bin/docker -d
|
||||
end script
|
||||
10
deb/etc/docker.upstart
Normal file
10
deb/etc/docker.upstart
Normal file
@@ -0,0 +1,10 @@
|
||||
description "Run docker"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on starting rc RUNLEVEL=[016]
|
||||
respawn
|
||||
|
||||
script
|
||||
test -f /etc/default/locale && . /etc/default/locale || true
|
||||
LANG=$LANG LC_ALL=$LANG /usr/bin/docker -d
|
||||
end script
|
||||
25
docker.go
25
docker.go
@@ -1,9 +1,9 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"./fs"
|
||||
"container/list"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/fs"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
@@ -74,15 +74,14 @@ func (docker *Docker) Destroy(container *Container) error {
|
||||
}
|
||||
if container.Mountpoint.Mounted() {
|
||||
if err := container.Mountpoint.Umount(); err != nil {
|
||||
log.Printf("Unable to umount container %v: %v", container.Id, err)
|
||||
}
|
||||
|
||||
if err := container.Mountpoint.Deregister(); err != nil {
|
||||
log.Printf("Unable to deregiser mountpoint %v: %v", container.Mountpoint.Root, err)
|
||||
return fmt.Errorf("Unable to umount container %v: %v", container.Id, err)
|
||||
}
|
||||
}
|
||||
if err := container.Mountpoint.Deregister(); err != nil {
|
||||
return fmt.Errorf("Unable to deregiser -- ? mountpoint %v: %v", container.Mountpoint.Root, err)
|
||||
}
|
||||
if err := os.RemoveAll(container.Root); err != nil {
|
||||
log.Printf("Unable to remove filesystem for %v: %v", container.Id, err)
|
||||
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.Id, err)
|
||||
}
|
||||
docker.containers.Remove(element)
|
||||
return nil
|
||||
@@ -109,6 +108,12 @@ func New() (*Docker, error) {
|
||||
}
|
||||
|
||||
func NewFromDirectory(root string) (*Docker, error) {
|
||||
docker_repo := path.Join(root, "containers")
|
||||
|
||||
if err := os.MkdirAll(docker_repo, 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := fs.New(path.Join(root, "images"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -120,16 +125,12 @@ func NewFromDirectory(root string) (*Docker, error) {
|
||||
|
||||
docker := &Docker{
|
||||
root: root,
|
||||
repository: path.Join(root, "containers"),
|
||||
repository: docker_repo,
|
||||
containers: list.New(),
|
||||
Store: store,
|
||||
networkManager: netManager,
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(docker.repository, 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := docker.restore(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,29 +1,92 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"../client"
|
||||
"flag"
|
||||
"github.com/dotcloud/docker"
|
||||
"github.com/dotcloud/docker/future"
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if cmd := path.Base(os.Args[0]); cmd == "docker" {
|
||||
fl_shell := flag.Bool("i", false, "Interactive mode")
|
||||
flag.Parse()
|
||||
if *fl_shell {
|
||||
if err := client.InteractiveMode(flag.Args()...); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := client.SimpleMode(os.Args[1:]); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if docker.SelfPath() == "/sbin/init" {
|
||||
// Running in init mode
|
||||
docker.SysInit()
|
||||
return
|
||||
}
|
||||
fl_daemon := flag.Bool("d", false, "Daemon mode")
|
||||
flag.Parse()
|
||||
if *fl_daemon {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
if err := daemon(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := client.SimpleMode(append([]string{cmd}, os.Args[1:]...)); err != nil {
|
||||
if err := runCommand(flag.Args()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func daemon() error {
|
||||
service, err := docker.NewServer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return rcli.ListenAndServe("tcp", "127.0.0.1:4242", service)
|
||||
}
|
||||
|
||||
func runCommand(args []string) error {
|
||||
var oldState *term.State
|
||||
var err error
|
||||
if term.IsTerminal(0) && os.Getenv("NORAW") == "" {
|
||||
oldState, err = term.MakeRaw(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer term.Restore(0, oldState)
|
||||
}
|
||||
// FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose
|
||||
// CloseWrite(), which we need to cleanly signal that stdin is closed without
|
||||
// closing the connection.
|
||||
// See http://code.google.com/p/go/issues/detail?id=3345
|
||||
if conn, err := rcli.Call("tcp", "127.0.0.1:4242", args...); err == nil {
|
||||
receive_stdout := future.Go(func() error {
|
||||
_, err := io.Copy(os.Stdout, conn)
|
||||
return err
|
||||
})
|
||||
send_stdin := future.Go(func() error {
|
||||
_, err := io.Copy(conn, os.Stdin)
|
||||
if err := conn.CloseWrite(); err != nil {
|
||||
log.Printf("Couldn't send EOF: " + err.Error())
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err := <-receive_stdout; err != nil {
|
||||
return err
|
||||
}
|
||||
if !term.IsTerminal(0) {
|
||||
if err := <-send_stdin; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
service, err := docker.NewServer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rcli.LocalCall(service, os.Stdin, os.Stdout, args...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if oldState != nil {
|
||||
term.Restore(0, oldState)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,20 +1,32 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"./fs"
|
||||
"github.com/dotcloud/docker/fs"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const testLayerPath string = "/var/lib/docker/docker-ut.tar"
|
||||
const unitTestImageName string = "busybox"
|
||||
|
||||
var unitTestStoreBase string
|
||||
var srv *Server
|
||||
|
||||
func nuke(docker *Docker) error {
|
||||
return os.RemoveAll(docker.root)
|
||||
}
|
||||
|
||||
func CopyDirectory(source, dest string) error {
|
||||
if _, err := exec.Command("cp", "-ra", source, dest).Output(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func layerArchive(tarfile string) (io.Reader, error) {
|
||||
// FIXME: need to close f somewhere
|
||||
f, err := os.Open(tarfile)
|
||||
@@ -28,15 +40,35 @@ func init() {
|
||||
// Hack to run sys init during unit testing
|
||||
if SelfPath() == "/sbin/init" {
|
||||
SysInit()
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure the unit test image is there
|
||||
if _, err := os.Stat(testLayerPath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
panic(err)
|
||||
}
|
||||
log.Fatalf("Unit test base image not found. Please fix the problem by running \"debootstrap --arch=amd64 quantal %v\"", testLayerPath)
|
||||
return
|
||||
if usr, err := user.Current(); err != nil {
|
||||
panic(err)
|
||||
} else if usr.Uid != "0" {
|
||||
panic("docker tests needs to be run as root")
|
||||
}
|
||||
|
||||
// Create a temp directory
|
||||
root, err := ioutil.TempDir("", "docker-test")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
unitTestStoreBase = root
|
||||
|
||||
// Make it our Store root
|
||||
docker, err := NewFromDirectory(root)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create the "Server"
|
||||
srv := &Server{
|
||||
images: docker.Store,
|
||||
containers: docker,
|
||||
}
|
||||
// Retrieve the Image
|
||||
if err := srv.CmdImport(os.Stdin, os.Stdout, unitTestImageName); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,19 +77,19 @@ func newTestDocker() (*Docker, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.Remove(root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := CopyDirectory(unitTestStoreBase, root); err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
docker, err := NewFromDirectory(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if layer, err := layerArchive(testLayerPath); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
_, err = docker.Store.Create(layer, nil, "docker-ut", "unit tests")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return docker, nil
|
||||
}
|
||||
|
||||
@@ -231,25 +263,22 @@ func TestGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRestore(t *testing.T) {
|
||||
|
||||
root, err := ioutil.TempDir("", "docker-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove(root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := CopyDirectory(unitTestStoreBase, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
docker1, err := NewFromDirectory(root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(docker1)
|
||||
|
||||
if layer, err := layerArchive(testLayerPath); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
_, err = docker1.Store.Create(layer, nil, "docker-ut", "unit tests")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a container with one instance of docker
|
||||
container1, err := docker1.Create(
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
".."
|
||||
"../server"
|
||||
"flag"
|
||||
"log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if docker.SelfPath() == "/sbin/init" {
|
||||
// Running in init mode
|
||||
docker.SysInit()
|
||||
return
|
||||
}
|
||||
flag.Parse()
|
||||
d, err := server.New()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := d.ListenAndServe(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,73 +0,0 @@
|
||||
#!/usr/bin/env docker -i
|
||||
|
||||
# Uncomment to debug:
|
||||
#set -x
|
||||
|
||||
export NORAW=1
|
||||
|
||||
IMG=shykes/pybuilder:11d4f58638a72935
|
||||
|
||||
if [ $# -lt 3 ]; then
|
||||
echo "Usage: $0 build|run USER/REPO REV"
|
||||
echo "Example usage:"
|
||||
echo ""
|
||||
echo " REV=7d5f035432fe1453eea389b0f1b02a2a93c8009e"
|
||||
echo " $0 build shykes/helloflask \$REV"
|
||||
echo " $0 run shykes/helloflask \$REV"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CMD=$1
|
||||
|
||||
FORCE=0
|
||||
if [ "$2" = "-f" ]; then
|
||||
FORCE=1
|
||||
shift
|
||||
fi
|
||||
|
||||
REPO=$2
|
||||
REV=$3
|
||||
|
||||
BUILD_IMAGE=builds/github.com/$REPO/$REV
|
||||
|
||||
|
||||
if [ "$CMD" = "build" ]; then
|
||||
if [ ! -z "`images -q $BUILD_IMAGE`" ]; then
|
||||
if [ "$FORCE" -ne 1 ]; then
|
||||
echo "$BUILD_IMAGE already exists"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
# Allocate a TTY to work around python's aggressive buffering of stdout
|
||||
BUILD_JOB=`run -t $IMG /usr/local/bin/buildapp http://github.com/$REPO/archive/$REV.tar.gz`
|
||||
|
||||
if [ -z "$BUILD_JOB" ]; then
|
||||
echo "Build failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if attach $BUILD_JOB ; then
|
||||
BUILD_STATUS=`docker wait $BUILD_JOB`
|
||||
if [ -z "$BUILD_STATUS" -o "$BUILD_STATUS" != 0 ]; then
|
||||
echo "Build failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
else
|
||||
echo "Build failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
commit $BUILD_JOB $BUILD_IMAGE
|
||||
|
||||
echo "Build saved at $BUILD_IMAGE"
|
||||
elif [ "$CMD" = "run" ]; then
|
||||
RUN_JOB=`run $BUILD_IMAGE /usr/local/bin/runapp`
|
||||
if [ -z "$RUN_JOB" ]; then
|
||||
echo "Run failed"
|
||||
exit 1
|
||||
fi
|
||||
attach $RUN_JOB
|
||||
fi
|
||||
25
fs/layers.go
25
fs/layers.go
@@ -1,10 +1,9 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"../future"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/dotcloud/docker/future"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@@ -88,34 +87,14 @@ func (store *LayerStore) AddLayer(id string, archive Archive) (string, error) {
|
||||
if _, err := os.Stat(store.layerPath(id)); err == nil {
|
||||
return "", fmt.Errorf("Layer already exists: %v", id)
|
||||
}
|
||||
errors := make(chan error)
|
||||
// Untar
|
||||
tmp, err := store.Mktemp()
|
||||
defer os.RemoveAll(tmp)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Mktemp failed: %s", err)
|
||||
}
|
||||
|
||||
untarR, untarW := io.Pipe()
|
||||
go func() {
|
||||
errors <- Untar(untarR, tmp)
|
||||
}()
|
||||
_, err = io.Copy(untarW, archive)
|
||||
untarW.Close()
|
||||
if err != nil {
|
||||
if err := Untar(archive, tmp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Wait for goroutines
|
||||
for i := 0; i < 1; i += 1 {
|
||||
select {
|
||||
case err := <-errors:
|
||||
{
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
layer := store.layerPath(id)
|
||||
if !store.Exists(id) {
|
||||
if err := os.Rename(tmp, layer); err != nil {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"../fake"
|
||||
"github.com/dotcloud/docker/fake"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
223
fs/remove_test.go
Normal file
223
fs/remove_test.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/fake"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func countImages(store *Store) int {
|
||||
paths, err := store.Images()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return len(paths)
|
||||
}
|
||||
|
||||
func TestRemoveInPath(t *testing.T) {
|
||||
store, err := TempStore("test-remove-in-path")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(store)
|
||||
archive, err := fake.FakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 10 create / Delete all
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := store.Create(archive, nil, "foo", "Testing"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if c := countImages(store); c != 10 {
|
||||
t.Fatalf("Expected 10 images, %d found", c)
|
||||
}
|
||||
if err := store.RemoveInPath("foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 10 create / Delete 1
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if c := countImages(store); c != 10 {
|
||||
t.Fatalf("Expected 10 images, %d found", c)
|
||||
}
|
||||
if err := store.RemoveInPath("foo-0"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 9 {
|
||||
t.Fatalf("Expected 9 images, %d found", c)
|
||||
}
|
||||
|
||||
// Delete failure
|
||||
if err := store.RemoveInPath("Not_Foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 9 {
|
||||
t.Fatalf("Expected 9 images, %d found", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
store, err := TempStore("test-remove")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(store)
|
||||
archive, err := fake.FakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 1 create / 1 delete
|
||||
img, err := store.Create(archive, nil, "foo", "Testing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 1 {
|
||||
t.Fatalf("Expected 1 images, %d found", c)
|
||||
}
|
||||
if err := store.Remove(img); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 2 create (same name) / 1 delete
|
||||
img1, err := store.Create(archive, nil, "foo", "Testing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
img2, err := store.Create(archive, nil, "foo", "Testing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 2 {
|
||||
t.Fatalf("Expected 2 images, %d found", c)
|
||||
}
|
||||
if err := store.Remove(img1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 1 {
|
||||
t.Fatalf("Expected 1 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test delete wrong name
|
||||
// Note: If we change orm and Delete of non existing return error, we will need to change this test
|
||||
if err := store.Remove(&Image{Id: "Not_foo", store: img2.store}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 1 {
|
||||
t.Fatalf("Expected 1 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test delete last one
|
||||
if err := store.Remove(img2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveRegexp(t *testing.T) {
|
||||
store, err := TempStore("test-remove-regexp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(store)
|
||||
archive, err := fake.FakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 10 create with different names / Delete all good regexp
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if c := countImages(store); c != 10 {
|
||||
t.Fatalf("Expected 10 images, %d found", c)
|
||||
}
|
||||
if err := store.RemoveRegexp("foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 10 create with different names / Delete all good regexp globing
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if c := countImages(store); c != 10 {
|
||||
t.Fatalf("Expected 10 images, %d found", c)
|
||||
}
|
||||
if err := store.RemoveRegexp("foo-*"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 10 create with different names / Delete all bad regexp
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if c := countImages(store); c != 10 {
|
||||
t.Fatalf("Expected 10 images, %d found", c)
|
||||
}
|
||||
if err := store.RemoveRegexp("oo-*"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 0 {
|
||||
t.Fatalf("Expected 0 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test 10 create with different names / Delete none strict regexp
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := store.Create(archive, nil, fmt.Sprintf("foo-%d", i), "Testing"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if c := countImages(store); c != 10 {
|
||||
t.Fatalf("Expected 10 images, %d found", c)
|
||||
}
|
||||
if err := store.RemoveRegexp("^oo-"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 10 {
|
||||
t.Fatalf("Expected 10 images, %d found", c)
|
||||
}
|
||||
|
||||
// Test delete 2
|
||||
if err := store.RemoveRegexp("^foo-[1,2]$"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c := countImages(store); c != 8 {
|
||||
t.Fatalf("Expected 8 images, %d found", c)
|
||||
}
|
||||
}
|
||||
141
fs/store.go
141
fs/store.go
@@ -1,10 +1,9 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"../future"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/future"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/shykes/gorp" //Forked to implement CreateTablesOpts
|
||||
"io"
|
||||
@@ -12,6 +11,8 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
@@ -92,9 +93,48 @@ func (store *Store) Paths() ([]string, error) {
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func (store *Store) RemoveInPath(pth string) error {
|
||||
images, err := store.List(pth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, img := range images {
|
||||
if err = store.Remove(img); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteMatch deletes all images whose name matches `pattern`
|
||||
func (store *Store) RemoveRegexp(pattern string) error {
|
||||
// Retrieve all the paths
|
||||
paths, err := store.Paths()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check the pattern on each elements
|
||||
for _, pth := range paths {
|
||||
if match, err := regexp.MatchString(pattern, pth); err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
// If there is a match, remove it
|
||||
if err := store.RemoveInPath(pth); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *Store) Remove(img *Image) error {
|
||||
_, err := store.orm.Delete(img)
|
||||
return err
|
||||
}
|
||||
|
||||
func (store *Store) List(pth string) ([]*Image, error) {
|
||||
pth = path.Clean(pth)
|
||||
images, err := store.orm.Select(Image{}, "select images.* from images, paths where Path=? and paths.Image=images.Id", pth)
|
||||
images, err := store.orm.Select(Image{}, "select images.* from images, paths where Path=? and paths.Image=images.Id order by images.Created desc", pth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -110,7 +150,19 @@ func (store *Store) Find(pth string) (*Image, error) {
|
||||
return img, nil
|
||||
}
|
||||
|
||||
images, err := store.orm.Select(Image{}, "select images.* from images, paths where Path=? and paths.Image=images.Id order by images.Created desc limit 1", pth)
|
||||
var q string
|
||||
var args []interface{}
|
||||
// FIXME: this breaks if the path contains a ':'
|
||||
// If format is path:rev
|
||||
if parts := strings.SplitN(pth, ":", 2); len(parts) == 2 {
|
||||
q = "select Images.* from images, paths where Path=? and images.Id=? and paths.Image=images.Id"
|
||||
args = []interface{}{parts[0], parts[1]}
|
||||
// If format is path:rev
|
||||
} else {
|
||||
q = "select images.* from images, paths where Path=? and paths.Image=images.Id order by images.Created desc limit 1"
|
||||
args = []interface{}{parts[0]}
|
||||
}
|
||||
images, err := store.orm.Select(Image{}, q, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(images) < 1 {
|
||||
@@ -142,42 +194,35 @@ func (store *Store) Create(layerData Archive, parent *Image, pth, comment string
|
||||
if parent != nil {
|
||||
img.Parent = parent.Id
|
||||
}
|
||||
// FIXME: we shouldn't have to pass os.Stderr to AddLayer()...
|
||||
// FIXME: Archive should contain compression info. For now we only support uncompressed.
|
||||
err := store.Register(layerData, img, pth)
|
||||
return img, err
|
||||
}
|
||||
|
||||
func (store *Store) Register(layerData Archive, img *Image, pth string) error {
|
||||
img.store = store
|
||||
_, err := store.layers.AddLayer(img.Id, layerData)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not add layer: %s", err))
|
||||
return fmt.Errorf("Could not add layer: %s", err)
|
||||
}
|
||||
path := &Path{
|
||||
pathObj := &Path{
|
||||
Path: path.Clean(pth),
|
||||
Image: img.Id,
|
||||
}
|
||||
trans, err := store.orm.Begin()
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not begin transaction:", err))
|
||||
return fmt.Errorf("Could not begin transaction: %s", err)
|
||||
}
|
||||
if err := trans.Insert(img); err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not insert image info: %s", err))
|
||||
return fmt.Errorf("Could not insert image info: %s", err)
|
||||
}
|
||||
if err := trans.Insert(path); err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not insert path info: %s", err))
|
||||
if err := trans.Insert(pathObj); err != nil {
|
||||
return fmt.Errorf("Could not insert path info: %s", err)
|
||||
}
|
||||
if err := trans.Commit(); err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Could not commit transaction: %s", err))
|
||||
return fmt.Errorf("Could not commit transaction: %s", err)
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
func (store *Store) Register(image *Image, pth string) error {
|
||||
image.store = store
|
||||
// FIXME: import layer
|
||||
trans, err := store.orm.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trans.Insert(image)
|
||||
trans.Insert(&Path{Path: pth, Image: image.Id})
|
||||
return trans.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *Store) Layers() []string {
|
||||
@@ -234,6 +279,9 @@ func (image *Image) layers() ([]string, error) {
|
||||
return list, fmt.Errorf("Error while getting parent image: %v", err)
|
||||
}
|
||||
}
|
||||
if len(list) == 0 {
|
||||
return nil, fmt.Errorf("No layer found for image %s\n", image.Id)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
@@ -258,7 +306,7 @@ func (image *Image) Mount(root, rw string) (*Mountpoint, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not create mountpoint: %s", err)
|
||||
} else if mountpoint == nil {
|
||||
return nil, errors.New("No mountpoint created")
|
||||
return nil, fmt.Errorf("No mountpoint created")
|
||||
}
|
||||
} else {
|
||||
mountpoint = mp
|
||||
@@ -283,9 +331,38 @@ func (image *Image) Mount(root, rw string) (*Mountpoint, error) {
|
||||
return mountpoint, err
|
||||
}
|
||||
if !mountpoint.Mounted() {
|
||||
return mountpoint, errors.New("Mount failed")
|
||||
return mountpoint, fmt.Errorf("Mount failed")
|
||||
}
|
||||
|
||||
// FIXME: Create tests for deletion
|
||||
// FIXME: move this part to change.go, maybe refactor
|
||||
// fs.Change() to avoid the fake mountpoint
|
||||
// Retrieve the changeset from the parent and apply it to the container
|
||||
// - Retrieve the changes
|
||||
changes, err := image.store.Changes(&Mountpoint{
|
||||
Image: image.Id,
|
||||
Root: layers[0],
|
||||
Rw: layers[0],
|
||||
Store: image.store})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Iterate on changes
|
||||
for _, c := range changes {
|
||||
// If there is a delete
|
||||
if c.Kind == ChangeDelete {
|
||||
// Make sure the directory exists
|
||||
file_path, file_name := path.Dir(c.Path), path.Base(c.Path)
|
||||
if err := os.MkdirAll(path.Join(mountpoint.Rw, file_path), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// And create the whiteout (we just need to create empty file, discard the return)
|
||||
if _, err := os.Create(path.Join(path.Join(mountpoint.Rw, file_path),
|
||||
".wh."+path.Base(file_name))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return mountpoint, nil
|
||||
}
|
||||
|
||||
@@ -332,7 +409,7 @@ func (mp *Mountpoint) Mounted() bool {
|
||||
|
||||
func (mp *Mountpoint) Umount() error {
|
||||
if !mp.Mounted() {
|
||||
return errors.New("Mountpoint doesn't seem to be mounted")
|
||||
return fmt.Errorf("Mountpoint doesn't seem to be mounted")
|
||||
}
|
||||
if err := syscall.Unmount(mp.Root, 0); err != nil {
|
||||
return fmt.Errorf("Unmount syscall failed: %v", err)
|
||||
@@ -361,7 +438,7 @@ func (mp *Mountpoint) Umount() error {
|
||||
|
||||
func (mp *Mountpoint) Deregister() error {
|
||||
if mp.Mounted() {
|
||||
return errors.New("Mountpoint is currently mounted, can't deregister")
|
||||
return fmt.Errorf("Mountpoint is currently mounted, can't deregister")
|
||||
}
|
||||
|
||||
_, err := mp.Store.orm.Delete(mp)
|
||||
@@ -402,7 +479,7 @@ func (store *Store) AddTag(imageId, tagName string) error {
|
||||
if image, err := store.Get(imageId); err != nil {
|
||||
return err
|
||||
} else if image == nil {
|
||||
return errors.New("No image with ID " + imageId)
|
||||
return fmt.Errorf("No image with ID %s", imageId)
|
||||
}
|
||||
|
||||
err2 := store.orm.Insert(&Tag{
|
||||
@@ -418,7 +495,7 @@ func (store *Store) GetByTag(tagName string) (*Image, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if res == nil {
|
||||
return nil, errors.New("No image associated to tag \"" + tagName + "\"")
|
||||
return nil, fmt.Errorf("No image associated to tag \"%s\"", tagName)
|
||||
}
|
||||
|
||||
tag := res.(*Tag)
|
||||
@@ -427,7 +504,7 @@ func (store *Store) GetByTag(tagName string) (*Image, error) {
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
} else if img == nil {
|
||||
return nil, errors.New("Tag was found but image seems to be inexistent.")
|
||||
return nil, fmt.Errorf("Tag was found but image seems to be inexistent.")
|
||||
}
|
||||
|
||||
return img, nil
|
||||
|
||||
104
fs/store_test.go
104
fs/store_test.go
@@ -1,14 +1,17 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"../fake"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/fake"
|
||||
"github.com/dotcloud/docker/future"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FIXME: Remove the Fake package
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
store, err := TempStore("testinit")
|
||||
if err != nil {
|
||||
@@ -24,6 +27,8 @@ func TestInit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Do more extensive tests (ex: create multiple, delete, recreate;
|
||||
// create multiple, check the amount of images and paths, etc..)
|
||||
func TestCreate(t *testing.T) {
|
||||
store, err := TempStore("testcreate")
|
||||
if err != nil {
|
||||
@@ -52,6 +57,40 @@ func TestCreate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
store, err := TempStore("testregister")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(store)
|
||||
archive, err := fake.FakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
image := &Image{
|
||||
Id: future.RandomId(),
|
||||
Comment: "testing",
|
||||
Created: time.Now().Unix(),
|
||||
store: store,
|
||||
}
|
||||
err = store.Register(archive, image, "foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if images, err := store.Images(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if l := len(images); l != 1 {
|
||||
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
|
||||
}
|
||||
if images, err := store.List("foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if l := len(images); l != 1 {
|
||||
t.Fatalf("Path foo has wrong number of images (should be %d, not %d)", 1, l)
|
||||
} else if images[0].Id != image.Id {
|
||||
t.Fatalf("Imported image should be listed at path foo (%s != %s)", images[0], image)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTag(t *testing.T) {
|
||||
store, err := TempStore("testtag")
|
||||
if err != nil {
|
||||
@@ -193,63 +232,6 @@ func TestMountpointDuplicateRoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
store, err := TempStore("test-mount")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(store)
|
||||
archive, err := fake.FakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
image, err := store.Create(archive, nil, "foo", "Testing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Create mount targets
|
||||
root, err := ioutil.TempDir("", "docker-fs-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rw, err := ioutil.TempDir("", "docker-fs-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mountpoint, err := image.Mount(root, rw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mountpoint.Umount()
|
||||
// Mountpoint should be marked as mounted
|
||||
if !mountpoint.Mounted() {
|
||||
t.Fatal("Mountpoint not mounted")
|
||||
}
|
||||
// There should be one mountpoint registered
|
||||
if mps, err := image.Mountpoints(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(mps) != 1 {
|
||||
t.Fatal("Wrong number of mountpoints registered (should be %d, not %d)", 1, len(mps))
|
||||
}
|
||||
// Unmounting should work
|
||||
if err := mountpoint.Umount(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// De-registering should work
|
||||
if err := mountpoint.Deregister(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mps, err := image.Mountpoints(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(mps) != 0 {
|
||||
t.Fatal("Wrong number of mountpoints registered (should be %d, not %d)", 0, len(mps))
|
||||
}
|
||||
// General health check
|
||||
if err := healthCheck(store); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TempStore(prefix string) (*Store, error) {
|
||||
dir, err := ioutil.TempDir("", "docker-fs-test-"+prefix)
|
||||
if err != nil {
|
||||
@@ -278,7 +260,7 @@ func healthCheck(store *Store) error {
|
||||
for _, img := range images {
|
||||
// Check for duplicate IDs per path
|
||||
if _, exists := IDs[img.Id]; exists {
|
||||
return errors.New(fmt.Sprintf("Duplicate ID: %s", img.Id))
|
||||
return fmt.Errorf("Duplicate ID: %s", img.Id)
|
||||
} else {
|
||||
IDs[img.Id] = true
|
||||
}
|
||||
@@ -291,7 +273,7 @@ func healthCheck(store *Store) error {
|
||||
// Check non-existing parents
|
||||
for parent := range parents {
|
||||
if _, exists := parents[parent]; !exists {
|
||||
return errors.New("Reference to non-registered parent: " + parent)
|
||||
return fmt.Errorf("Reference to non-registered parent: %s", parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -3,10 +3,11 @@ package future
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os/exec"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -85,18 +86,51 @@ func Pv(src io.Reader, info io.Writer) io.Reader {
|
||||
return r
|
||||
}
|
||||
|
||||
// Curl makes an http request by executing the unix command 'curl', and returns
|
||||
// the body of the response. If `stderr` is not nil, a progress bar will be
|
||||
// written to it.
|
||||
func Curl(url string, stderr io.Writer) (io.Reader, error) {
|
||||
curl := exec.Command("curl", "-#", "-L", url)
|
||||
output, err := curl.StdoutPipe()
|
||||
if err != nil {
|
||||
// Request a given URL and return an io.Reader
|
||||
func Download(url string, stderr io.Writer) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error = nil
|
||||
if resp, err = http.Get(url); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
curl.Stderr = stderr
|
||||
if err := curl.Start(); err != nil {
|
||||
return nil, err
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
|
||||
}
|
||||
return output, nil
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Reader with progress bar
|
||||
type progressReader struct {
|
||||
reader io.ReadCloser // Stream to read from
|
||||
output io.Writer // Where to send progress bar to
|
||||
read_total int // Expected stream length (bytes)
|
||||
read_progress int // How much has been read so far (bytes)
|
||||
last_update int // How many bytes read at least update
|
||||
}
|
||||
|
||||
func (r *progressReader) Read(p []byte) (n int, err error) {
|
||||
read, err := io.ReadCloser(r.reader).Read(p)
|
||||
r.read_progress += read
|
||||
|
||||
// Only update progress for every 1% read
|
||||
update_every := int(0.01 * float64(r.read_total))
|
||||
if r.read_progress-r.last_update > update_every || r.read_progress == r.read_total {
|
||||
fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r",
|
||||
r.read_progress,
|
||||
r.read_total,
|
||||
float64(r.read_progress)/float64(r.read_total)*100)
|
||||
r.last_update = r.read_progress
|
||||
}
|
||||
// Send newline when complete
|
||||
if err == io.EOF {
|
||||
fmt.Fprintf(r.output, "\n")
|
||||
}
|
||||
|
||||
return read, err
|
||||
}
|
||||
func (r *progressReader) Close() error {
|
||||
return io.ReadCloser(r.reader).Close()
|
||||
}
|
||||
func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader {
|
||||
return &progressReader{r, output, size, 0, 0}
|
||||
}
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
type Compression uint32
|
||||
|
||||
const (
|
||||
Uncompressed Compression = iota
|
||||
Bzip2
|
||||
Gzip
|
||||
)
|
||||
|
||||
func (compression *Compression) Flag() string {
|
||||
switch *compression {
|
||||
case Bzip2:
|
||||
return "j"
|
||||
case Gzip:
|
||||
return "z"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func Tar(path string, compression Compression) (io.Reader, error) {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-c"+compression.Flag(), ".")
|
||||
return CmdStream(cmd)
|
||||
}
|
||||
|
||||
func Untar(archive io.Reader, path string) error {
|
||||
cmd := exec.Command("bsdtar", "-f", "-", "-C", path, "-x")
|
||||
cmd.Stdin = archive
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.New(err.Error() + ": " + string(output))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipeR, pipeW := io.Pipe()
|
||||
go func() {
|
||||
_, err := io.Copy(pipeW, stdout)
|
||||
if err != nil {
|
||||
pipeW.CloseWithError(err)
|
||||
}
|
||||
errText, e := ioutil.ReadAll(stderr)
|
||||
if e != nil {
|
||||
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// FIXME: can this block if stderr outputs more than the size of StderrPipe()'s buffer?
|
||||
pipeW.CloseWithError(errors.New(err.Error() + ": " + string(errText)))
|
||||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
}()
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pipeR, nil
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCmdStreamBad(t *testing.T) {
|
||||
badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
|
||||
out, err := CmdStream(badCmd)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start command: " + err.Error())
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err == nil {
|
||||
t.Fatalf("Command should have failed")
|
||||
} else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
|
||||
t.Fatalf("Wrong error value (%s)", err.Error())
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCmdStreamGood(t *testing.T) {
|
||||
cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0")
|
||||
out, err := CmdStream(cmd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if output, err := ioutil.ReadAll(out); err != nil {
|
||||
t.Fatalf("Command should not have failed (err=%s)", err)
|
||||
} else if s := string(output); s != "hello\n" {
|
||||
t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTarUntar(t *testing.T) {
|
||||
archive, err := Tar(".", Uncompressed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp, err := ioutil.TempDir("", "docker-test-untar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := Untar(archive, tmp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
t.Fatalf("Error stating %s: %s", tmp, err.Error())
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/dotcloud/docker/fake"
|
||||
"github.com/dotcloud/docker/future"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAddLayer(t *testing.T) {
|
||||
tmp, err := ioutil.TempDir("", "docker-test-image")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
store, err := NewLayerStore(tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
archive, err := fake.FakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
layer, err := store.AddLayer(archive)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := os.Stat(layer); err != nil {
|
||||
t.Fatalf("Error testing for existence of layer: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeId(t *testing.T) {
|
||||
id1, err := future.ComputeId(bytes.NewBufferString("hello world\n"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id2, err := future.ComputeId(bytes.NewBufferString("foo bar\n"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id1 == id2 {
|
||||
t.Fatalf("Identical checksums for difference content (%s == %s)", id1, id2)
|
||||
}
|
||||
}
|
||||
@@ -82,7 +82,7 @@ lxc.mount.entry = /etc/resolv.conf {{$ROOTFS}}/etc/resolv.conf none bind,ro 0 0
|
||||
|
||||
|
||||
# drop linux capabilities (apply mainly to the user root in the container)
|
||||
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod net_raw setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
|
||||
lxc.cap.drop = audit_control audit_write mac_admin mac_override mknod setfcap setpcap sys_admin sys_boot sys_module sys_nice sys_pacct sys_rawio sys_resource sys_time sys_tty_config
|
||||
|
||||
# limits
|
||||
{{if .Config.Memory}}
|
||||
|
||||
115
mount_test.go
Normal file
115
mount_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/fake"
|
||||
"github.com/dotcloud/docker/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Look for inconsistencies in a store.
|
||||
func healthCheck(store *fs.Store) error {
|
||||
parents := make(map[string]bool)
|
||||
paths, err := store.Paths()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, path := range paths {
|
||||
images, err := store.List(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
IDs := make(map[string]bool) // All IDs for this path
|
||||
for _, img := range images {
|
||||
// Check for duplicate IDs per path
|
||||
if _, exists := IDs[img.Id]; exists {
|
||||
return fmt.Errorf("Duplicate ID: %s", img.Id)
|
||||
} else {
|
||||
IDs[img.Id] = true
|
||||
}
|
||||
// Store parent for 2nd pass
|
||||
if parent := img.Parent; parent != "" {
|
||||
parents[parent] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check non-existing parents
|
||||
for parent := range parents {
|
||||
if _, exists := parents[parent]; !exists {
|
||||
return fmt.Errorf("Reference to non-registered parent: %s", parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note: This test is in the docker package because he needs to be run as root
|
||||
func TestMount(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "docker-fs-test-mount")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
store, err := fs.New(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
archive, err := fake.FakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
image, err := store.Create(archive, nil, "foo", "Testing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create mount targets
|
||||
root, err := ioutil.TempDir("", "docker-fs-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
rw, err := ioutil.TempDir("", "docker-fs-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rw)
|
||||
|
||||
mountpoint, err := image.Mount(root, rw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mountpoint.Umount()
|
||||
// Mountpoint should be marked as mounted
|
||||
if !mountpoint.Mounted() {
|
||||
t.Fatal("Mountpoint not mounted")
|
||||
}
|
||||
// There should be one mountpoint registered
|
||||
if mps, err := image.Mountpoints(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(mps) != 1 {
|
||||
t.Fatal("Wrong number of mountpoints registered (should be %d, not %d)", 1, len(mps))
|
||||
}
|
||||
// Unmounting should work
|
||||
if err := mountpoint.Umount(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// De-registering should work
|
||||
if err := mountpoint.Deregister(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mps, err := image.Mountpoints(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if len(mps) != 0 {
|
||||
t.Fatal("Wrong number of mountpoints registered (should be %d, not %d)", 0, len(mps))
|
||||
}
|
||||
// General health check
|
||||
if err := healthCheck(store); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,27 +1,69 @@
|
||||
class virtualbox {
|
||||
Package { ensure => "installed" }
|
||||
|
||||
# remove some files from the base vagrant image because they're old
|
||||
file { "/home/vagrant/docker-master":
|
||||
ensure => absent,
|
||||
recurse => true,
|
||||
force => true,
|
||||
purge => true,
|
||||
}
|
||||
file { "/usr/local/bin/dockerd":
|
||||
ensure => absent,
|
||||
}
|
||||
|
||||
# Set up VirtualBox guest utils
|
||||
package { "virtualbox-guest-utils": }
|
||||
exec { "vbox-add" :
|
||||
command => "/etc/init.d/vboxadd setup",
|
||||
require => [
|
||||
Package["virtualbox-guest-utils"],
|
||||
Package["linux-headers-3.5.0-25-generic"], ],
|
||||
}
|
||||
}
|
||||
|
||||
class ec2 {
|
||||
}
|
||||
|
||||
class docker {
|
||||
|
||||
# update this with latest docker binary distro
|
||||
$docker_url = "http://docker.io.s3.amazonaws.com/builds/$kernel/$hardwaremodel/docker-master.tgz"
|
||||
$docker_url = "http://get.docker.io/builds/$kernel/$hardwaremodel/docker-master.tgz"
|
||||
# update this with latest go binary distry
|
||||
$go_url = "http://go.googlecode.com/files/go1.0.3.linux-amd64.tar.gz"
|
||||
|
||||
Package { ensure => "installed" }
|
||||
|
||||
package { ["lxc", "debootstrap", "wget", "bsdtar", "git",
|
||||
"pkg-config", "libsqlite3-dev",
|
||||
"linux-image-3.5.0-25-generic",
|
||||
"linux-image-extra-3.5.0-25-generic",
|
||||
"virtualbox-guest-utils",
|
||||
"linux-headers-3.5.0-25-generic"]: }
|
||||
|
||||
notify { "docker_url = $docker_url": withpath => true }
|
||||
|
||||
exec { "debootstrap" :
|
||||
require => Package["debootstrap"],
|
||||
command => "/usr/sbin/debootstrap --arch=amd64 quantal /var/lib/docker/images/docker-ut",
|
||||
creates => "/var/lib/docker/images/docker-ut",
|
||||
timeout => 0
|
||||
$ec2_version = file("/etc/ec2_version", "/dev/null")
|
||||
if ($ec2_version) {
|
||||
include ec2
|
||||
} else {
|
||||
# virtualbox is the vagrant default, so it should be safe to assume
|
||||
include virtualbox
|
||||
}
|
||||
|
||||
user { "vagrant":
|
||||
ensure => present,
|
||||
comment => "Vagrant User",
|
||||
shell => "/bin/bash",
|
||||
home => "/home/vagrant",
|
||||
}
|
||||
|
||||
file { "/usr/local/bin":
|
||||
ensure => directory,
|
||||
owner => root,
|
||||
group => root,
|
||||
mode => 755,
|
||||
}
|
||||
|
||||
exec { "fetch-go":
|
||||
require => Package["wget"],
|
||||
command => "/usr/bin/wget -O - $go_url | /bin/tar xz -C /usr/local",
|
||||
@@ -29,9 +71,8 @@ class docker {
|
||||
}
|
||||
|
||||
exec { "fetch-docker" :
|
||||
command => "/usr/bin/wget -O - $docker_url | /bin/tar xz -C /tmp",
|
||||
require => Package["wget"],
|
||||
command => "/usr/bin/wget -O - $docker_url | /bin/tar xz -C /home/vagrant",
|
||||
creates => "/home/vagrant/docker-master"
|
||||
}
|
||||
|
||||
file { "/etc/init/dockerd.conf":
|
||||
@@ -39,24 +80,25 @@ class docker {
|
||||
owner => "root",
|
||||
group => "root",
|
||||
content => template("docker/dockerd.conf"),
|
||||
require => [Exec["fetch-docker"], Exec["debootstrap"]]
|
||||
require => Exec["fetch-docker"],
|
||||
}
|
||||
|
||||
file { "/home/vagrant":
|
||||
mode => 644,
|
||||
require => User["vagrant"],
|
||||
}
|
||||
|
||||
file { "/home/vagrant/.profile":
|
||||
mode => 644,
|
||||
owner => "vagrant",
|
||||
group => "ubuntu",
|
||||
content => template("docker/profile"),
|
||||
require => File["/home/vagrant"],
|
||||
}
|
||||
|
||||
exec { "copy-docker-bin" :
|
||||
require => Exec["fetch-docker"],
|
||||
command => "/bin/cp /home/vagrant/docker-master/docker /usr/local/bin",
|
||||
creates => "/usr/local/bin/docker"
|
||||
}
|
||||
|
||||
exec { "copy-dockerd-bin" :
|
||||
require => Exec["fetch-docker"],
|
||||
command => "/bin/cp /home/vagrant/docker-master/dockerd /usr/local/bin",
|
||||
creates => "/usr/local/bin/dockerd"
|
||||
}
|
||||
|
||||
exec { "vbox-add" :
|
||||
require => Package["linux-headers-3.5.0-25-generic"],
|
||||
command => "/etc/init.d/vboxadd setup",
|
||||
command => "/usr/bin/sudo /bin/cp -f /tmp/docker-master/docker /usr/local/bin/",
|
||||
require => [ Exec["fetch-docker"], File["/usr/local/bin"] ],
|
||||
}
|
||||
|
||||
service { "dockerd" :
|
||||
@@ -67,6 +109,4 @@ class docker {
|
||||
name => "dockerd",
|
||||
provider => "base"
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -8,5 +8,5 @@ respawn
|
||||
|
||||
script
|
||||
test -f /etc/default/locale && . /etc/default/locale || true
|
||||
LANG=$LANG LC_ALL=$LANG /usr/local/bin/dockerd
|
||||
LANG=$LANG LC_ALL=$LANG /usr/local/bin/docker -d
|
||||
end script
|
||||
|
||||
27
puppet/modules/docker/templates/profile
Normal file
27
puppet/modules/docker/templates/profile
Normal file
@@ -0,0 +1,27 @@
|
||||
# ~/.profile: executed by the command interpreter for login shells.
|
||||
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
|
||||
# exists.
|
||||
# see /usr/share/doc/bash/examples/startup-files for examples.
|
||||
# the files are located in the bash-doc package.
|
||||
|
||||
# the default umask is set in /etc/profile; for setting the umask
|
||||
# for ssh logins, install and configure the libpam-umask package.
|
||||
#umask 022
|
||||
|
||||
# if running bash
|
||||
if [ -n "$BASH_VERSION" ]; then
|
||||
# include .bashrc if it exists
|
||||
if [ -f "$HOME/.bashrc" ]; then
|
||||
. "$HOME/.bashrc"
|
||||
fi
|
||||
fi
|
||||
|
||||
# set PATH so it includes user's private bin if it exists
|
||||
if [ -d "$HOME/bin" ] ; then
|
||||
PATH="$HOME/bin:$PATH"
|
||||
fi
|
||||
|
||||
# set ~/docker as the go path
|
||||
export GOPATH=~/docker
|
||||
# add go to the PATH
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
@@ -25,7 +25,12 @@ type Service interface {
|
||||
type Cmd func(io.ReadCloser, io.Writer, ...string) error
|
||||
type CmdMethod func(Service, io.ReadCloser, io.Writer, ...string) error
|
||||
|
||||
// FIXME: For reverse compatibility
|
||||
func call(service Service, stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
return LocalCall(service, stdin, stdout, args...)
|
||||
}
|
||||
|
||||
func LocalCall(service Service, stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
if len(args) == 0 {
|
||||
args = []string{"help"}
|
||||
}
|
||||
|
||||
2
state.go
2
state.go
@@ -1,8 +1,8 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"./future"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/future"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package client
|
||||
package term
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
@@ -1,4 +1,4 @@
|
||||
package client
|
||||
package term
|
||||
|
||||
import "syscall"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package client
|
||||
package term
|
||||
|
||||
import "syscall"
|
||||
|
||||
Reference in New Issue
Block a user