mirror of
https://github.com/moby/moby.git
synced 2026-01-12 03:01:38 +00:00
Compare commits
77 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38b8373434 | ||
|
|
03b5f8a585 | ||
|
|
bc260f0225 | ||
|
|
45dcd1125b | ||
|
|
d2e063d9e1 | ||
|
|
567a484b66 | ||
|
|
5d4b886ad6 | ||
|
|
90668a8a99 | ||
|
|
c7fd84b8a0 | ||
|
|
874a40ed3a | ||
|
|
370fafacbf | ||
|
|
a0478f726d | ||
|
|
e5bc5a2e31 | ||
|
|
25fc3a7e76 | ||
|
|
b3ab0b561e | ||
|
|
8b8c8bf7cb | ||
|
|
a8651a23b2 | ||
|
|
f744cfd5a7 | ||
|
|
e03b241fb1 | ||
|
|
1ddca1948b | ||
|
|
2485bb2cd2 | ||
|
|
7577f48dc4 | ||
|
|
0512cf9c83 | ||
|
|
73da7a12e7 | ||
|
|
50f5723f1d | ||
|
|
cbc4eccd50 | ||
|
|
cff26b3a6c | ||
|
|
329c3e0ffd | ||
|
|
4f6cc5c733 | ||
|
|
e413340723 | ||
|
|
95e066d24f | ||
|
|
82b8f7a565 | ||
|
|
97badbd29e | ||
|
|
5a5e417d46 | ||
|
|
4031a01af1 | ||
|
|
0b0d958b88 | ||
|
|
03e4704ae5 | ||
|
|
7a8ac76299 | ||
|
|
c05c91ca3b | ||
|
|
b76d63cb0c | ||
|
|
f926ed182f | ||
|
|
d440782e17 | ||
|
|
82848d4158 | ||
|
|
97535e5a64 | ||
|
|
f079fbe3fa | ||
|
|
90d144b612 | ||
|
|
d3db94696d | ||
|
|
ffe16e3224 | ||
|
|
0b60829df7 | ||
|
|
690e118670 | ||
|
|
038e1d174b | ||
|
|
6c8dcd5cbb | ||
|
|
16aeb77d51 | ||
|
|
4ac3b803b9 | ||
|
|
3514e47edc | ||
|
|
acb546cd1b | ||
|
|
2ced94b414 | ||
|
|
71b5806614 | ||
|
|
1f65c6bf4c | ||
|
|
965e8a02d2 | ||
|
|
baacae8345 | ||
|
|
52cedb8a05 | ||
|
|
15c7e72e2a | ||
|
|
76b40ad6c9 | ||
|
|
6909f3911f | ||
|
|
0731d1a582 | ||
|
|
8ecde8f9a5 | ||
|
|
e49af5b6de | ||
|
|
f3e89fae28 | ||
|
|
c42a4179fc | ||
|
|
2d32ac8cff | ||
|
|
f68d107a13 | ||
|
|
640efc2ed2 | ||
|
|
003622c8b6 | ||
|
|
6de5ca1e64 | ||
|
|
7eda9c64b8 | ||
|
|
84c13a3dcf |
86
CHANGELOG.md
Normal file
86
CHANGELOG.md
Normal file
@@ -0,0 +1,86 @@
|
||||
# Changelog
|
||||
|
||||
## 0.2.0 (2012-04-23)
|
||||
- Runtime: ghost containers can be killed and waited for
|
||||
* Documentation: update install intructions
|
||||
- Packaging: fix Vagrantfile
|
||||
- Development: automate releasing binaries and ubuntu packages
|
||||
+ Add a changelog
|
||||
- Various bugfixes
|
||||
|
||||
|
||||
## 0.1.8 (2013-04-22)
|
||||
- Dynamically detect cgroup capabilities
|
||||
- Issue stability warning on kernels <3.8
|
||||
- 'docker push' buffers on disk instead of memory
|
||||
- Fix 'docker diff' for removed files
|
||||
- Fix 'docker stop' for ghost containers
|
||||
- Fix handling of pidfile
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
## 0.1.7 (2013-04-18)
|
||||
- Container ports are available on localhost
|
||||
- 'docker ps' shows allocated TCP ports
|
||||
- Contributors can run 'make hack' to start a continuous integration VM
|
||||
- Streamline ubuntu packaging & uploading
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
## 0.1.6 (2013-04-17)
|
||||
- Record the author an image with 'docker commit -author'
|
||||
|
||||
## 0.1.5 (2013-04-17)
|
||||
- Disable standalone mode
|
||||
- Use a custom DNS resolver with 'docker -d -dns'
|
||||
- Detect ghost containers
|
||||
- Improve diagnosis of missing system capabilities
|
||||
- Allow disabling memory limits at compile time
|
||||
- Add debian packaging
|
||||
- Documentation: installing on Arch Linux
|
||||
- Documentation: running Redis on docker
|
||||
- Fixed lxc 0.9 compatibility
|
||||
- Automatically load aufs module
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
## 0.1.4 (2013-04-09)
|
||||
- Full support for TTY emulation
|
||||
- Detach from a TTY session with the escape sequence `C-p C-q`
|
||||
- Various bugfixes and stability improvements
|
||||
- Minor UI improvements
|
||||
- Automatically create our own bridge interface 'docker0'
|
||||
|
||||
## 0.1.3 (2013-04-04)
|
||||
- Choose TCP frontend port with '-p :PORT'
|
||||
- Layer format is versioned
|
||||
- Major reliability improvements to the process manager
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
## 0.1.2 (2013-04-03)
|
||||
- Set container hostname with 'docker run -h'
|
||||
- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
|
||||
- Various bugfixes and stability improvements
|
||||
- UI polish
|
||||
- Progress bar on push/pull
|
||||
- Use XZ compression by default
|
||||
- Make IP allocator lazy
|
||||
|
||||
## 0.1.1 (2013-03-31)
|
||||
- Display shorthand IDs for convenience
|
||||
- Stabilize process management
|
||||
- Layers can include a commit message
|
||||
- Simplified 'docker attach'
|
||||
- Fixed support for re-attaching
|
||||
- Various bugfixes and stability improvements
|
||||
- Auto-download at run
|
||||
- Auto-login on push
|
||||
- Beefed up documentation
|
||||
|
||||
## 0.1.0 (2013-03-23)
|
||||
- First release
|
||||
- Implement registry in order to push/pull images
|
||||
- TCP port allocation
|
||||
- Fix termcaps on Linux
|
||||
- Add documentation
|
||||
- Add Vagrant support with Vagrantfile
|
||||
- Add unit tests
|
||||
- Add repository/tags to ease image management
|
||||
- Improve the layer implementation
|
||||
33
Makefile
33
Makefile
@@ -1,5 +1,9 @@
|
||||
DOCKER_PACKAGE := github.com/dotcloud/docker
|
||||
RELEASE_VERSION := $(shell git tag | grep -E "v[0-9\.]+$$" | sort -nr | head -n 1)
|
||||
SRCRELEASE := docker-$(RELEASE_VERSION)
|
||||
BINRELEASE := docker-$(RELEASE_VERSION).tgz
|
||||
|
||||
GIT_ROOT := $(shell git rev-parse --show-toplevel)
|
||||
BUILD_DIR := $(CURDIR)/.gopath
|
||||
|
||||
GOPATH ?= $(BUILD_DIR)
|
||||
@@ -13,10 +17,7 @@ endif
|
||||
GIT_COMMIT = $(shell git rev-parse --short HEAD)
|
||||
GIT_STATUS = $(shell test -n "`git status --porcelain`" && echo "+CHANGES")
|
||||
|
||||
NO_MEMORY_LIMIT ?= 0
|
||||
export NO_MEMORY_LIMIT
|
||||
|
||||
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS) -X main.NO_MEMORY_LIMIT $(NO_MEMORY_LIMIT)"
|
||||
BUILD_OPTIONS = -ldflags "-X main.GIT_COMMIT $(GIT_COMMIT)$(GIT_STATUS)"
|
||||
|
||||
SRC_DIR := $(GOPATH)/src
|
||||
|
||||
@@ -26,18 +27,38 @@ DOCKER_MAIN := $(DOCKER_DIR)/docker
|
||||
DOCKER_BIN_RELATIVE := bin/docker
|
||||
DOCKER_BIN := $(CURDIR)/$(DOCKER_BIN_RELATIVE)
|
||||
|
||||
.PHONY: all clean test hack
|
||||
.PHONY: all clean test hack release srcrelease $(BINRELEASE) $(SRCRELEASE) $(DOCKER_BIN) $(DOCKER_DIR)
|
||||
|
||||
all: $(DOCKER_BIN)
|
||||
|
||||
$(DOCKER_BIN): $(DOCKER_DIR)
|
||||
@mkdir -p $(dir $@)
|
||||
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
|
||||
@(cd $(DOCKER_MAIN); go build $(GO_OPTIONS) $(BUILD_OPTIONS) -o $@)
|
||||
@echo $(DOCKER_BIN_RELATIVE) is created.
|
||||
|
||||
$(DOCKER_DIR):
|
||||
@mkdir -p $(dir $@)
|
||||
@rm -f $@
|
||||
@ln -sf $(CURDIR)/ $@
|
||||
@(cd $(DOCKER_MAIN); go get $(GO_OPTIONS))
|
||||
|
||||
whichrelease:
|
||||
echo $(RELEASE_VERSION)
|
||||
|
||||
release: $(BINRELEASE)
|
||||
srcrelease: $(SRCRELEASE)
|
||||
deps: $(DOCKER_DIR)
|
||||
|
||||
# A clean checkout of $RELEASE_VERSION, with vendored dependencies
|
||||
$(SRCRELEASE):
|
||||
rm -fr $(SRCRELEASE)
|
||||
git clone $(GIT_ROOT) $(SRCRELEASE)
|
||||
cd $(SRCRELEASE); git checkout -q $(RELEASE_VERSION)
|
||||
|
||||
# A binary release ready to be uploaded to a mirror
|
||||
$(BINRELEASE): $(SRCRELEASE)
|
||||
rm -f $(BINRELEASE)
|
||||
cd $(SRCRELEASE); make; cp -R bin docker-$(RELEASE_VERSION); tar -f ../$(BINRELEASE) -zv -c docker-$(RELEASE_VERSION)
|
||||
|
||||
clean:
|
||||
@rm -rf $(dir $(DOCKER_BIN))
|
||||
|
||||
@@ -125,7 +125,7 @@ Running an irc bouncer
|
||||
|
||||
```bash
|
||||
BOUNCER_ID=$(docker run -d -p 6667 -u irc shykes/znc $USER $PASSWORD)
|
||||
echo "Configure your irc client to connect to port $(port $BOUNCER_ID 6667) of this machine"
|
||||
echo "Configure your irc client to connect to port $(docker port $BOUNCER_ID 6667) of this machine"
|
||||
```
|
||||
|
||||
Running Redis
|
||||
@@ -133,7 +133,7 @@ Running Redis
|
||||
|
||||
```bash
|
||||
REDIS_ID=$(docker run -d -p 6379 shykes/redis redis-server)
|
||||
echo "Configure your redis client to connect to port $(port $REDIS_ID 6379) of this machine"
|
||||
echo "Configure your redis client to connect to port $(docker port $REDIS_ID 6379) of this machine"
|
||||
```
|
||||
|
||||
Share your own image!
|
||||
|
||||
57
Vagrantfile
vendored
57
Vagrantfile
vendored
@@ -2,19 +2,12 @@
|
||||
# vi: set ft=ruby :
|
||||
|
||||
def v10(config)
|
||||
config.vm.box = "quantal64_3.5.0-25"
|
||||
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
|
||||
config.vm.share_folder "v-data", "/opt/go/src/github.com/dotcloud/docker", File.dirname(__FILE__)
|
||||
|
||||
# Ensure puppet is installed on the instance
|
||||
config.vm.provision :shell, :inline => "apt-get -qq update; apt-get install -y puppet"
|
||||
|
||||
config.vm.provision :puppet do |puppet|
|
||||
puppet.manifests_path = "puppet/manifests"
|
||||
puppet.manifest_file = "quantal64.pp"
|
||||
puppet.module_path = "puppet/modules"
|
||||
end
|
||||
# Install ubuntu packaging dependencies and create ubuntu packages
|
||||
config.vm.provision :shell, :inline => "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >>/etc/apt/sources.list"
|
||||
config.vm.provision :shell, :inline => 'export DEBIAN_FRONTEND=noninteractive; apt-get -qq update; apt-get install -qq -y --force-yes lxc-docker'
|
||||
end
|
||||
|
||||
Vagrant::VERSION < "1.1.0" and Vagrant::Config.run do |config|
|
||||
@@ -30,11 +23,11 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
aws.ssh_private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-ae9806c7"
|
||||
aws.ami = "ami-d0f89fb9"
|
||||
aws.ssh_username = "ubuntu"
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
@@ -51,7 +44,39 @@ Vagrant::VERSION >= "1.1.0" and Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = "quantal64_3.5.0-25"
|
||||
config.vm.box_url = "http://get.docker.io/vbox/ubuntu/12.10/quantal64_3.5.0-25.box"
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant::VERSION >= "1.2.0" and Vagrant.configure("2") do |config|
|
||||
config.vm.provider :aws do |aws, override|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
aws.access_key_id = ENV["AWS_ACCESS_KEY_ID"]
|
||||
aws.secret_access_key = ENV["AWS_SECRET_ACCESS_KEY"]
|
||||
aws.keypair_name = ENV["AWS_KEYPAIR_NAME"]
|
||||
override.ssh.private_key_path = ENV["AWS_SSH_PRIVKEY"]
|
||||
override.ssh.username = "ubuntu"
|
||||
aws.region = "us-east-1"
|
||||
aws.ami = "ami-d0f89fb9"
|
||||
aws.instance_type = "t1.micro"
|
||||
end
|
||||
|
||||
config.vm.provider :rackspace do |rs|
|
||||
config.vm.box = "dummy"
|
||||
config.vm.box_url = "https://github.com/mitchellh/vagrant-rackspace/raw/master/dummy.box"
|
||||
config.ssh.private_key_path = ENV["RS_PRIVATE_KEY"]
|
||||
rs.username = ENV["RS_USERNAME"]
|
||||
rs.api_key = ENV["RS_API_KEY"]
|
||||
rs.public_key_path = ENV["RS_PUBLIC_KEY"]
|
||||
rs.flavor = /512MB/
|
||||
rs.image = /Ubuntu/
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.box_url = 'http://files.vagrantup.com/precise64.box'
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
36
archive.go
36
archive.go
@@ -4,6 +4,7 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
@@ -86,3 +87,38 @@ func CmdStream(cmd *exec.Cmd) (io.Reader, error) {
|
||||
}
|
||||
return pipeR, nil
|
||||
}
|
||||
|
||||
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
||||
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
||||
// the file will be deleted.
|
||||
func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
||||
f, err := ioutil.TempFile(dir, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := io.Copy(f, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := st.Size()
|
||||
return &TempArchive{f, size}, nil
|
||||
}
|
||||
|
||||
type TempArchive struct {
|
||||
*os.File
|
||||
Size int64 // Pre-computed from Stat().Size() as a convenience
|
||||
}
|
||||
|
||||
func (archive *TempArchive) Read(data []byte) (int, error) {
|
||||
n, err := archive.File.Read(data)
|
||||
if err != nil {
|
||||
os.Remove(archive.File.Name())
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
169
builder.go
Normal file
169
builder.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Builder struct {
|
||||
runtime *Runtime
|
||||
}
|
||||
|
||||
func NewBuilder(runtime *Runtime) *Builder {
|
||||
return &Builder{
|
||||
runtime: runtime,
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Run(image *Image, cmd ...string) (*Container, error) {
|
||||
// FIXME: pass a NopWriter instead of nil
|
||||
config, err := ParseRun(append([]string{"-d", image.Id}, cmd...), nil, builder.runtime.capabilities)
|
||||
if config.Image == "" {
|
||||
return nil, fmt.Errorf("Image not specified")
|
||||
}
|
||||
if len(config.Cmd) == 0 {
|
||||
return nil, fmt.Errorf("Command not specified")
|
||||
}
|
||||
if config.Tty {
|
||||
return nil, fmt.Errorf("The tty mode is not supported within the builder")
|
||||
}
|
||||
|
||||
// Create new container
|
||||
container, err := builder.runtime.Create(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := container.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (builder *Builder) Commit(container *Container, repository, tag, comment, author string) (*Image, error) {
|
||||
return builder.runtime.Commit(container.Id, repository, tag, comment, author)
|
||||
}
|
||||
|
||||
func (builder *Builder) clearTmp(containers, images map[string]struct{}) {
|
||||
for c := range containers {
|
||||
tmp := builder.runtime.Get(c)
|
||||
builder.runtime.Destroy(tmp)
|
||||
Debugf("Removing container %s", c)
|
||||
}
|
||||
for i := range images {
|
||||
builder.runtime.graph.Delete(i)
|
||||
Debugf("Removing image %s", i)
|
||||
}
|
||||
}
|
||||
|
||||
func (builder *Builder) Build(dockerfile io.Reader, stdout io.Writer) error {
|
||||
var (
|
||||
image, base *Image
|
||||
tmpContainers map[string]struct{} = make(map[string]struct{})
|
||||
tmpImages map[string]struct{} = make(map[string]struct{})
|
||||
)
|
||||
defer builder.clearTmp(tmpContainers, tmpImages)
|
||||
|
||||
file := bufio.NewReader(dockerfile)
|
||||
for {
|
||||
line, err := file.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
// Skip comments and empty line
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
tmp := strings.SplitN(line, " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid Dockerfile format")
|
||||
}
|
||||
switch tmp[0] {
|
||||
case "from":
|
||||
fmt.Fprintf(stdout, "FROM %s\n", tmp[1])
|
||||
image, err = builder.runtime.repositories.LookupImage(tmp[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
case "run":
|
||||
fmt.Fprintf(stdout, "RUN %s\n", tmp[1])
|
||||
if image == nil {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to run")
|
||||
}
|
||||
|
||||
// Create the container and start it
|
||||
c, err := builder.Run(image, "/bin/sh", "-c", tmp[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpContainers[c.Id] = struct{}{}
|
||||
|
||||
// Wait for it to finish
|
||||
if result := c.Wait(); result != 0 {
|
||||
return fmt.Errorf("!!! '%s' return non-zero exit code '%d'. Aborting.", tmp[1], result)
|
||||
}
|
||||
|
||||
// Commit the container
|
||||
base, err = builder.Commit(c, "", "", "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpImages[base.Id] = struct{}{}
|
||||
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
break
|
||||
case "copy":
|
||||
if image == nil {
|
||||
return fmt.Errorf("Please provide a source image with `from` prior to copy")
|
||||
}
|
||||
tmp2 := strings.SplitN(tmp[1], " ", 2)
|
||||
if len(tmp) != 2 {
|
||||
return fmt.Errorf("Invalid COPY format")
|
||||
}
|
||||
fmt.Fprintf(stdout, "COPY %s to %s in %s\n", tmp2[0], tmp2[1], base.ShortId())
|
||||
|
||||
file, err := Download(tmp2[0], stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
c, err := builder.Run(base, "echo", "insert", tmp2[0], tmp2[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Inject(file.Body, tmp2[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
base, err = builder.Commit(c, "", "", "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "===> %s\n", base.ShortId())
|
||||
break
|
||||
default:
|
||||
fmt.Fprintf(stdout, "Skipping unknown op %s\n", tmp[0])
|
||||
}
|
||||
}
|
||||
if base != nil {
|
||||
// The build is successful, keep the temporary containers and images
|
||||
for i := range tmpImages {
|
||||
delete(tmpImages, i)
|
||||
}
|
||||
for i := range tmpContainers {
|
||||
delete(tmpContainers, i)
|
||||
}
|
||||
fmt.Fprintf(stdout, "Build finished. image id: %s\n", base.ShortId())
|
||||
} else {
|
||||
fmt.Fprintf(stdout, "An error occured during the build\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
103
commands.go
103
commands.go
@@ -10,6 +10,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -18,11 +19,10 @@ import (
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const VERSION = "0.1.7"
|
||||
const VERSION = "0.2.0"
|
||||
|
||||
var (
|
||||
GIT_COMMIT string
|
||||
NO_MEMORY_LIMIT bool
|
||||
GIT_COMMIT string
|
||||
)
|
||||
|
||||
func (srv *Server) Name() string {
|
||||
@@ -34,6 +34,7 @@ func (srv *Server) Help() string {
|
||||
help := "Usage: docker COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n"
|
||||
for _, cmd := range [][]string{
|
||||
{"attach", "Attach to a running container"},
|
||||
{"build", "Build a container from Dockerfile"},
|
||||
{"commit", "Create a new image from a container's changes"},
|
||||
{"diff", "Inspect changes on a container's filesystem"},
|
||||
{"export", "Stream the contents of a container as a tar archive"},
|
||||
@@ -41,6 +42,7 @@ func (srv *Server) Help() string {
|
||||
{"images", "List images"},
|
||||
{"import", "Create a new filesystem image from the contents of a tarball"},
|
||||
{"info", "Display system-wide information"},
|
||||
{"insert", "Insert a file in an image"},
|
||||
{"inspect", "Return low-level information on a container"},
|
||||
{"kill", "Kill a running container"},
|
||||
{"login", "Register or Login to the docker registry server"},
|
||||
@@ -64,6 +66,74 @@ func (srv *Server) Help() string {
|
||||
return help
|
||||
}
|
||||
|
||||
func (srv *Server) CmdInsert(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error {
|
||||
stdout.Flush()
|
||||
cmd := rcli.Subcmd(stdout, "insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
if cmd.NArg() != 3 {
|
||||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
imageId := cmd.Arg(0)
|
||||
url := cmd.Arg(1)
|
||||
path := cmd.Arg(2)
|
||||
|
||||
img, err := srv.runtime.repositories.LookupImage(imageId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := Download(url, stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Body.Close()
|
||||
|
||||
b := NewBuilder(srv.runtime)
|
||||
c, err := b.Run(img, "echo", "insert", url, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.Inject(ProgressReader(file.Body, int(file.ContentLength), stdout, "Downloading %v/%v (%v)"), path); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: Handle custom repo, tag comment, author
|
||||
img, err = b.Commit(c, "", "", img.Comment, img.Author)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", img.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv *Server) CmdBuild(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error {
|
||||
stdout.Flush()
|
||||
cmd := rcli.Subcmd(stdout, "build", "[Dockerfile|-]", "Build a container from Dockerfile")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
dockerfile := cmd.Arg(0)
|
||||
if dockerfile == "" {
|
||||
dockerfile = "Dockerfile"
|
||||
}
|
||||
|
||||
var file io.Reader
|
||||
|
||||
if dockerfile != "-" {
|
||||
f, err := os.Open(dockerfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
file = f
|
||||
} else {
|
||||
file = stdin
|
||||
}
|
||||
return NewBuilder(srv.runtime).Build(file, stdout)
|
||||
}
|
||||
|
||||
// 'docker login': login / register a user to registry service.
|
||||
func (srv *Server) CmdLogin(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error {
|
||||
// Read a line on raw terminal with support for simple backspace
|
||||
@@ -184,10 +254,14 @@ func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string
|
||||
|
||||
// 'docker version': show version information
|
||||
func (srv *Server) CmdVersion(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
|
||||
fmt.Fprintf(stdout, "Version:%s\n", VERSION)
|
||||
fmt.Fprintf(stdout, "Git Commit:%s\n", GIT_COMMIT)
|
||||
if NO_MEMORY_LIMIT {
|
||||
fmt.Fprintf(stdout, "Memory limit disabled\n")
|
||||
fmt.Fprintf(stdout, "Version: %s\n", VERSION)
|
||||
fmt.Fprintf(stdout, "Git Commit: %s\n", GIT_COMMIT)
|
||||
fmt.Fprintf(stdout, "Kernel: %s\n", srv.runtime.kernelVersion)
|
||||
if !srv.runtime.capabilities.MemoryLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: No memory limit support\n")
|
||||
}
|
||||
if !srv.runtime.capabilities.SwapLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: No swap limit support\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -472,7 +546,7 @@ func (srv *Server) CmdImport(stdin io.ReadCloser, stdout rcli.DockerConn, args .
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout)
|
||||
archive = ProgressReader(resp.Body, int(resp.ContentLength), stdout, "Importing %v/%v (%v)")
|
||||
}
|
||||
img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "")
|
||||
if err != nil {
|
||||
@@ -833,6 +907,10 @@ func (srv *Server) CmdAttach(stdin io.ReadCloser, stdout rcli.DockerConn, args .
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
||||
if container.State.Ghost {
|
||||
return fmt.Errorf("Impossible to attach to a ghost container")
|
||||
}
|
||||
|
||||
if container.Config.Tty {
|
||||
stdout.SetOptionRawTerminal()
|
||||
}
|
||||
@@ -910,7 +988,7 @@ func (srv *Server) CmdTag(stdin io.ReadCloser, stdout io.Writer, args ...string)
|
||||
}
|
||||
|
||||
func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...string) error {
|
||||
config, err := ParseRun(args, stdout)
|
||||
config, err := ParseRun(args, stdout, srv.runtime.capabilities)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -976,8 +1054,13 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s
|
||||
}
|
||||
Debugf("Waiting for attach to return\n")
|
||||
<-attachErr
|
||||
container.Wait()
|
||||
// Expecting I/O pipe error, discarding
|
||||
|
||||
// If we are in stdinonce mode, wait for the process to end
|
||||
// otherwise, simply return
|
||||
if config.StdinOnce && !config.Tty {
|
||||
container.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -228,6 +228,21 @@ func TestRunDisconnectTty(t *testing.T) {
|
||||
close(c1)
|
||||
}()
|
||||
|
||||
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
|
||||
for {
|
||||
// Client disconnect after run -i should keep stdin out in TTY mode
|
||||
l := runtime.List()
|
||||
if len(l) == 1 && l[0].State.Running {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
|
||||
// Client disconnect after run -i should keep stdin out in TTY mode
|
||||
container := runtime.List()[0]
|
||||
|
||||
setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() {
|
||||
if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 15); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -242,8 +257,6 @@ func TestRunDisconnectTty(t *testing.T) {
|
||||
// In tty mode, we expect the process to stay alive even after client's stdin closes.
|
||||
// Do not wait for run to finish
|
||||
|
||||
// Client disconnect after run -i should keep stdin out in TTY mode
|
||||
container := runtime.List()[0]
|
||||
// Give some time to monitor to do his thing
|
||||
container.WaitTimeout(500 * time.Millisecond)
|
||||
if !container.State.Running {
|
||||
|
||||
83
container.go
83
container.go
@@ -68,7 +68,7 @@ type Config struct {
|
||||
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
|
||||
}
|
||||
|
||||
func ParseRun(args []string, stdout io.Writer) (*Config, error) {
|
||||
func ParseRun(args []string, stdout io.Writer, capabilities *Capabilities) (*Config, error) {
|
||||
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
|
||||
if len(args) > 0 && args[0] != "--help" {
|
||||
cmd.SetOutput(ioutil.Discard)
|
||||
@@ -83,8 +83,8 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) {
|
||||
flTty := cmd.Bool("t", false, "Allocate a pseudo-tty")
|
||||
flMemory := cmd.Int64("m", 0, "Memory limit (in bytes)")
|
||||
|
||||
if *flMemory > 0 && NO_MEMORY_LIMIT {
|
||||
fmt.Fprintf(stdout, "WARNING: This version of docker has been compiled without memory limit support. Discarding -m.")
|
||||
if *flMemory > 0 && !capabilities.MemoryLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
*flMemory = 0
|
||||
}
|
||||
|
||||
@@ -137,6 +137,12 @@ func ParseRun(args []string, stdout io.Writer) (*Config, error) {
|
||||
Dns: flDns,
|
||||
Image: image,
|
||||
}
|
||||
|
||||
if *flMemory > 0 && !capabilities.SwapLimit {
|
||||
fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
config.MemorySwap = -1
|
||||
}
|
||||
|
||||
// When allocating stdin in attached mode, close stdin at client disconnect
|
||||
if config.OpenStdin && config.AttachStdin {
|
||||
config.StdinOnce = true
|
||||
@@ -162,6 +168,23 @@ func (settings *NetworkSettings) PortMappingHuman() string {
|
||||
return strings.Join(mapping, ", ")
|
||||
}
|
||||
|
||||
// Inject the io.Reader at the given path. Note: do not close the reader
|
||||
func (container *Container) Inject(file io.Reader, pth string) error {
|
||||
// Make sure the directory exists
|
||||
if err := os.MkdirAll(path.Join(container.rwPath(), path.Dir(pth)), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: Handle permissions/already existing dest
|
||||
dest, err := os.Create(path.Join(container.rwPath(), pth))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(dest, file); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Cmd() *exec.Cmd {
|
||||
return container.cmd
|
||||
}
|
||||
@@ -379,10 +402,15 @@ func (container *Container) Start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.Config.Memory > 0 && NO_MEMORY_LIMIT {
|
||||
log.Printf("WARNING: This version of docker has been compiled without memory limit support. Discarding the limit.")
|
||||
// Make sure the config is compatible with the current kernel
|
||||
if container.Config.Memory > 0 && !container.runtime.capabilities.MemoryLimit {
|
||||
log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
||||
container.Config.Memory = 0
|
||||
}
|
||||
if container.Config.Memory > 0 && !container.runtime.capabilities.SwapLimit {
|
||||
log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
||||
container.Config.MemorySwap = -1
|
||||
}
|
||||
|
||||
if err := container.generateLXCConfig(); err != nil {
|
||||
return err
|
||||
@@ -519,16 +547,42 @@ func (container *Container) releaseNetwork() {
|
||||
container.NetworkSettings = &NetworkSettings{}
|
||||
}
|
||||
|
||||
// FIXME: replace this with a control socket within docker-init
|
||||
func (container *Container) waitLxc() error {
|
||||
for {
|
||||
if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if !strings.Contains(string(output), "RUNNING") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) monitor() {
|
||||
// Wait for the program to exit
|
||||
Debugf("Waiting for process")
|
||||
if err := container.cmd.Wait(); err != nil {
|
||||
// Discard the error as any signals or non 0 returns will generate an error
|
||||
Debugf("%s: Process: %s", container.Id, err)
|
||||
|
||||
// If the command does not exists, try to wait via lxc
|
||||
if container.cmd == nil {
|
||||
if err := container.waitLxc(); err != nil {
|
||||
Debugf("%s: Process: %s", container.Id, err)
|
||||
}
|
||||
} else {
|
||||
if err := container.cmd.Wait(); err != nil {
|
||||
// Discard the error as any signals or non 0 returns will generate an error
|
||||
Debugf("%s: Process: %s", container.Id, err)
|
||||
}
|
||||
}
|
||||
Debugf("Process finished")
|
||||
|
||||
exitCode := container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
var exitCode int = -1
|
||||
if container.cmd != nil {
|
||||
exitCode = container.cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
container.releaseNetwork()
|
||||
@@ -577,7 +631,7 @@ func (container *Container) monitor() {
|
||||
}
|
||||
|
||||
func (container *Container) kill() error {
|
||||
if !container.State.Running || container.cmd == nil {
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -589,6 +643,9 @@ func (container *Container) kill() error {
|
||||
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if err := container.WaitTimeout(10 * time.Second); err != nil {
|
||||
if container.cmd == nil {
|
||||
return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", container.Id)
|
||||
}
|
||||
log.Printf("Container %s failed to exit within 10 seconds of lxc SIGKILL - trying direct SIGKILL", container.Id)
|
||||
if err := container.cmd.Process.Kill(); err != nil {
|
||||
return err
|
||||
@@ -606,9 +663,6 @@ func (container *Container) Kill() error {
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
if container.State.Ghost {
|
||||
return fmt.Errorf("Can't kill ghost container")
|
||||
}
|
||||
return container.kill()
|
||||
}
|
||||
|
||||
@@ -618,9 +672,6 @@ func (container *Container) Stop(seconds int) error {
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
if container.State.Ghost {
|
||||
return fmt.Errorf("Can't stop ghot container")
|
||||
}
|
||||
|
||||
// 1. Send a SIGTERM
|
||||
if output, err := exec.Command("lxc-kill", "-n", container.Id, "15").CombinedOutput(); err != nil {
|
||||
|
||||
@@ -150,6 +150,82 @@ func TestMultipleAttachRestart(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer nuke(runtime)
|
||||
|
||||
// Create a container and remove a file
|
||||
container1, err := runtime.Create(
|
||||
&Config{
|
||||
Image: GetTestImage(runtime).Id,
|
||||
Cmd: []string{"/bin/rm", "/etc/passwd"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container1)
|
||||
|
||||
if err := container1.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the changelog
|
||||
c, err := container1.Changes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
success := false
|
||||
for _, elem := range c {
|
||||
if elem.Path == "/etc/passwd" && elem.Kind == 2 {
|
||||
success = true
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
t.Fatalf("/etc/passwd as been removed but is not present in the diff")
|
||||
}
|
||||
|
||||
// Commit the container
|
||||
rwTar, err := container1.ExportRw()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
img, err := runtime.graph.Create(rwTar, container1, "unit test commited image - diff", "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Create a new container from the commited image
|
||||
container2, err := runtime.Create(
|
||||
&Config{
|
||||
Image: img.Id,
|
||||
Cmd: []string{"cat", "/etc/passwd"},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer runtime.Destroy(container2)
|
||||
|
||||
if err := container2.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check the changelog
|
||||
c, err = container2.Changes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, elem := range c {
|
||||
if elem.Path == "/etc/passwd" {
|
||||
t.Fatalf("/etc/passwd should not be present in the diff after commit.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitRun(t *testing.T) {
|
||||
runtime, err := newTestRuntime()
|
||||
if err != nil {
|
||||
|
||||
@@ -7,15 +7,16 @@ import (
|
||||
"github.com/dotcloud/docker/rcli"
|
||||
"github.com/dotcloud/docker/term"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
GIT_COMMIT string
|
||||
NO_MEMORY_LIMIT string
|
||||
GIT_COMMIT string
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -39,15 +40,11 @@ func main() {
|
||||
os.Setenv("DEBUG", "1")
|
||||
}
|
||||
docker.GIT_COMMIT = GIT_COMMIT
|
||||
docker.NO_MEMORY_LIMIT = NO_MEMORY_LIMIT == "1"
|
||||
if *flDaemon {
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
if NO_MEMORY_LIMIT == "1" {
|
||||
log.Printf("WARNING: This version of docker has been compiled without memory limit support.")
|
||||
}
|
||||
if err := daemon(*pidfile); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -59,8 +56,13 @@ func main() {
|
||||
}
|
||||
|
||||
func createPidFile(pidfile string) error {
|
||||
if _, err := os.Stat(pidfile); err == nil {
|
||||
return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
|
||||
if pidString, err := ioutil.ReadFile(pidfile); err == nil {
|
||||
pid, err := strconv.Atoi(string(pidString))
|
||||
if err == nil {
|
||||
if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil {
|
||||
return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
file, err := os.Create(pidfile)
|
||||
|
||||
@@ -51,6 +51,7 @@ docs:
|
||||
cp sources/dotcloud.yml $(BUILDDIR)/html/
|
||||
cp sources/CNAME $(BUILDDIR)/html/
|
||||
cp sources/.nojekyll $(BUILDDIR)/html/
|
||||
cp sources/nginx.conf $(BUILDDIR)/html/
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ Building blocks
|
||||
|
||||
Images
|
||||
------
|
||||
An original container image. These are stored on disk and are comparable with what you normally expect from a stoppped virtual machine image. Images are stored (and retrieved from) repository
|
||||
An original container image. These are stored on disk and are comparable with what you normally expect from a stopped virtual machine image. Images are stored (and retrieved from) repository
|
||||
|
||||
Images are stored on your local file system under /var/lib/docker/images
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ Save the changed we just made in the container to a new image called "_/builds/g
|
||||
WEB_WORKER=$(docker run -d -p 5000 $BUILD_IMG /usr/local/bin/runapp)
|
||||
|
||||
- **"docker run -d "** run a command in a new container. We pass "-d" so it runs as a daemon.
|
||||
**"-p 5000"* the web app is going to listen on this port, so it must be mapped from the container to the host system.
|
||||
- **"-p 5000"** the web app is going to listen on this port, so it must be mapped from the container to the host system.
|
||||
- **"$BUILD_IMG"** is the image we want to run the command inside of.
|
||||
- **/usr/local/bin/runapp** is the command which starts the web app.
|
||||
|
||||
|
||||
@@ -71,34 +71,40 @@
|
||||
<h2>
|
||||
<a name="installing-on-ubuntu-1204-and-1210" class="anchor" href="#installing-on-ubuntu-1204-and-1210"><span class="mini-icon mini-icon-link"></span>
|
||||
</a>Installing on Ubuntu</h2>
|
||||
|
||||
<p><strong>Requirements</strong></p>
|
||||
<ul>
|
||||
<li>Ubuntu 12.04 (LTS) (64-bit)</li>
|
||||
<li> or Ubuntu 12.10 (quantal) (64-bit)</li>
|
||||
</ul>
|
||||
<ol>
|
||||
<li>
|
||||
<p>Install dependencies:</p>
|
||||
<p><strong>Install dependencies</strong></p>
|
||||
The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
|
||||
<pre>sudo apt-get install linux-image-extra-`uname -r`</pre>
|
||||
|
||||
<div class="highlight">
|
||||
<pre>sudo apt-get install lxc wget bsdtar curl</pre>
|
||||
<pre>sudo apt-get install linux-image-extra-<span class="sb">`</span>uname -r<span class="sb">`</span></pre></div>
|
||||
|
||||
<p>The <code>linux-image-extra</code> package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>Install the latest docker binary:</p>
|
||||
<p><strong>Install Docker</strong></p>
|
||||
<p>Add the Ubuntu PPA (Personal Package Archive) sources to your apt sources list, update and install.</p>
|
||||
<p>You may see some warnings that the GPG keys cannot be verified.</p>
|
||||
<div class="highlight">
|
||||
<pre>sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"</pre>
|
||||
<pre>sudo apt-get update</pre>
|
||||
<pre>sudo apt-get install lxc-docker</pre>
|
||||
</div>
|
||||
|
||||
|
||||
</li>
|
||||
|
||||
<li>
|
||||
<p><strong>Run!</strong></p>
|
||||
|
||||
<div class="highlight">
|
||||
<pre>wget http://get.docker.io/builds/<span class="k">$(</span>uname -s<span class="k">)</span>/<span class="k">$(</span>uname -m<span class="k">)</span>/docker-master.tgz</pre>
|
||||
<pre>tar -xf docker-master.tgz</pre>
|
||||
<pre>docker run -i -t ubuntu /bin/bash</pre>
|
||||
</div>
|
||||
</li>
|
||||
<li>
|
||||
<p>Run your first container!</p>
|
||||
|
||||
<div class="highlight"><pre><span class="nb">cd </span>docker-master</pre>
|
||||
<pre>sudo ./docker run -i -t base /bin/bash</pre>
|
||||
</div>
|
||||
<p>Done!</p>
|
||||
<p>Consider adding docker to your <code>PATH</code> for simplicity.</p>
|
||||
</li>
|
||||
|
||||
Continue with the <a href="http://docs.docker.io/en/latest/examples/hello_world/">Hello world</a> example.
|
||||
</ol>
|
||||
</section>
|
||||
@@ -117,7 +123,7 @@
|
||||
vagrant and an Ubuntu virtual machine.</strong></p>
|
||||
|
||||
<ul>
|
||||
<li><a href="http://docs.docker.io/en/latest/installation/macos/">Mac OS X and other linuxes</a></li>
|
||||
<li><a href="http://docs.docker.io/en/latest/installation/vagrant/">Mac OS X and other linuxes</a></li>
|
||||
<li><a href="http://docs.docker.io/en/latest/installation/windows/">Windows</a></li>
|
||||
</ul>
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
Amazon EC2
|
||||
==========
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
|
||||
may be out of date because it depends on some binaries to be updated and published
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the
|
||||
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
@@ -17,7 +18,7 @@ Docker can now be installed on Amazon EC2 with a single vagrant command. Vagrant
|
||||
vagrant plugin install vagrant-aws
|
||||
|
||||
|
||||
3. Get the docker sources, this will give you the latest Vagrantfile and puppet manifests.
|
||||
3. Get the docker sources, this will give you the latest Vagrantfile.
|
||||
|
||||
::
|
||||
|
||||
|
||||
@@ -3,19 +3,23 @@
|
||||
Arch Linux
|
||||
==========
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the
|
||||
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
|
||||
|
||||
|
||||
Installing on Arch Linux is not officially supported but can be handled via
|
||||
either of the following AUR packages:
|
||||
|
||||
* `dotcloud-docker <https://aur.archlinux.org/packages/dotcloud-docker/>`_
|
||||
* `dotcloud-docker-git <https://aur.archlinux.org/packages/dotcloud-docker-git/>`_
|
||||
* `lxc-docker <https://aur.archlinux.org/packages/lxc-docker/>`_
|
||||
* `lxc-docker-git <https://aur.archlinux.org/packages/lxc-docker-git/>`_
|
||||
|
||||
The dotcloud-docker package will install the latest tagged version of docker.
|
||||
The dotcloud-docker-git package will build from the current master branch.
|
||||
The lxc-docker package will install the latest tagged version of docker.
|
||||
The lxc-docker-git package will build from the current master branch.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Docker depends on several packages which will be installed automatically with
|
||||
Docker depends on several packages which are specified as dependencies in
|
||||
either AUR package.
|
||||
|
||||
* aufs3
|
||||
@@ -23,6 +27,7 @@ either AUR package.
|
||||
* go
|
||||
* iproute2
|
||||
* linux-aufs_friendly
|
||||
* lxc
|
||||
|
||||
Installation
|
||||
------------
|
||||
@@ -37,7 +42,24 @@ new kernel will be compiled and this can take quite a while.
|
||||
|
||||
::
|
||||
|
||||
yaourt -S dotcloud-docker-git
|
||||
yaourt -S lxc-docker-git
|
||||
|
||||
|
||||
Starting Docker
|
||||
---------------
|
||||
|
||||
Prior to starting docker modify your bootloader to use the
|
||||
**linux-aufs_friendly** kernel and reboot your system.
|
||||
|
||||
There is a systemd service unit created for docker. To start the docker service:
|
||||
|
||||
::
|
||||
|
||||
sudo systemctl start docker
|
||||
|
||||
|
||||
To start on system boot:
|
||||
|
||||
::
|
||||
|
||||
sudo systemctl enable docker
|
||||
|
||||
53
docs/sources/installation/binaries.rst
Normal file
53
docs/sources/installation/binaries.rst
Normal file
@@ -0,0 +1,53 @@
|
||||
.. _binaries:
|
||||
|
||||
Binaries
|
||||
========
|
||||
|
||||
**Please note this project is currently under heavy development. It should not be used in production.**
|
||||
|
||||
|
||||
Right now, the officially supported distributions are:
|
||||
|
||||
- Ubuntu 12.04 (precise LTS) (64-bit)
|
||||
- Ubuntu 12.10 (quantal) (64-bit)
|
||||
|
||||
|
||||
Install dependencies:
|
||||
---------------------
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get install lxc bsdtar
|
||||
sudo apt-get install linux-image-extra-`uname -r`
|
||||
|
||||
The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
|
||||
|
||||
Install the docker binary:
|
||||
|
||||
::
|
||||
|
||||
wget http://get.docker.io/builds/Linux/x86_64/docker-master.tgz
|
||||
tar -xf docker-master.tgz
|
||||
sudo cp ./docker-master /usr/local/bin
|
||||
|
||||
Note: docker currently only supports 64-bit Linux hosts.
|
||||
|
||||
|
||||
Run the docker daemon
|
||||
---------------------
|
||||
|
||||
::
|
||||
|
||||
sudo docker -d &
|
||||
|
||||
|
||||
Run your first container!
|
||||
-------------------------
|
||||
|
||||
::
|
||||
|
||||
docker run -i -t ubuntu /bin/bash
|
||||
|
||||
|
||||
|
||||
Continue with the :ref:`hello_world` example.
|
||||
@@ -13,8 +13,9 @@ Contents:
|
||||
:maxdepth: 1
|
||||
|
||||
ubuntulinux
|
||||
binaries
|
||||
archlinux
|
||||
macos
|
||||
vagrant
|
||||
windows
|
||||
amazon
|
||||
upgrading
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
|
||||
Mac OS X and other linux
|
||||
========================
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
|
||||
may be out of date because it depends on some binaries to be updated and published
|
||||
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
We currently rely on some Ubuntu-linux specific packages, this will change in the future, but for now we provide a
|
||||
streamlined path to install Virtualbox with a Ubuntu 12.10 image using Vagrant.
|
||||
|
||||
1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
|
||||
2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
|
||||
3. Install git if you had not installed it before, check if it is installed by running
|
||||
``git`` in a terminal window
|
||||
|
||||
We recommend having at least about 2Gb of free disk space and 2Gb RAM (or more).
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
1. Fetch the docker sources
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/dotcloud/docker.git
|
||||
|
||||
2. Run vagrant from the sources directory
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant up
|
||||
|
||||
Vagrant will:
|
||||
|
||||
* Download the Quantal64 base ubuntu virtual machine image from get.docker.io/
|
||||
* Boot this image in virtualbox
|
||||
|
||||
Then it will use Puppet to perform an initial setup in this machine:
|
||||
|
||||
* Download & untar the most recent docker binary tarball to vagrant homedir.
|
||||
* Debootstrap to /var/lib/docker/images/ubuntu.
|
||||
* Install & run dockerd as service.
|
||||
* Put docker in /usr/local/bin.
|
||||
* Put latest Go toolchain in /usr/local/go.
|
||||
|
||||
You now have a Ubuntu Virtual Machine running with docker pre-installed.
|
||||
|
||||
To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
|
||||
``vagrant up``. Vagrant will make sure to connect you to the correct VM.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant ssh
|
||||
|
||||
Now you are in the VM, run docker
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker
|
||||
|
||||
|
||||
Continue with the :ref:`hello_world` example.
|
||||
@@ -6,51 +6,56 @@ Ubuntu Linux
|
||||
**Please note this project is currently under heavy development. It should not be used in production.**
|
||||
|
||||
|
||||
|
||||
Installing on Ubuntu 12.04 and 12.10
|
||||
|
||||
Right now, the officially supported distributions are:
|
||||
|
||||
* Ubuntu 12.04 (precise LTS)
|
||||
* Ubuntu 12.10 (quantal)
|
||||
- Ubuntu 12.04 (precise LTS) (64-bit)
|
||||
- Ubuntu 12.10 (quantal) (64-bit)
|
||||
|
||||
Install dependencies:
|
||||
---------------------
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
::
|
||||
The linux-image-extra package is only needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
|
||||
|
||||
sudo apt-get install lxc bsdtar
|
||||
sudo apt-get install linux-image-extra-`uname -r`
|
||||
.. code-block:: bash
|
||||
|
||||
The linux-image-extra package is needed on standard Ubuntu EC2 AMIs in order to install the aufs kernel module.
|
||||
|
||||
Install the docker binary
|
||||
-------------------------
|
||||
|
||||
::
|
||||
|
||||
wget http://get.docker.io/builds/Linux/x86_64/docker-master.tgz
|
||||
tar -xf docker-master.tgz
|
||||
sudo cp ./docker-master /usr/local/bin
|
||||
|
||||
Note: docker currently only supports 64-bit Linux hosts.
|
||||
sudo apt-get install linux-image-extra-`uname -r`
|
||||
|
||||
|
||||
Run the docker daemon
|
||||
---------------------
|
||||
Installation
|
||||
------------
|
||||
|
||||
::
|
||||
|
||||
sudo docker -d &
|
||||
|
||||
Run your first container!
|
||||
-------------------------
|
||||
|
||||
::
|
||||
docker run -i -t ubuntu /bin/bash
|
||||
Docker is available as a Ubuntu PPA (Personal Package Archive),
|
||||
`hosted on launchpad <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_
|
||||
which makes installing Docker on Ubuntu very easy.
|
||||
|
||||
|
||||
Check out more examples
|
||||
-----------------------
|
||||
|
||||
Continue with the :ref:`hello_world` example.
|
||||
Add the custom package sources to your apt sources list. Copy and paste the following lines at once.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "echo 'deb http://ppa.launchpad.net/dotcloud/lxc-docker/ubuntu precise main' >> /etc/apt/sources.list"
|
||||
|
||||
|
||||
Update your sources. You will see a warning that GPG signatures cannot be verified.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
|
||||
Now install it, you will see another warning that the package cannot be authenticated. Confirm install.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get install lxc-docker
|
||||
|
||||
|
||||
Verify it worked
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker
|
||||
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
Upgrading
|
||||
============
|
||||
|
||||
We assume you are upgrading from within the operating system which runs your docker daemon.
|
||||
These instructions are for upgrading your Docker binary for when you had a custom (non package manager) installation.
|
||||
If you istalled docker using apt-get, use that to upgrade.
|
||||
|
||||
|
||||
Get the latest docker binary:
|
||||
|
||||
70
docs/sources/installation/vagrant.rst
Normal file
70
docs/sources/installation/vagrant.rst
Normal file
@@ -0,0 +1,70 @@
|
||||
|
||||
.. _install_using_vagrant:
|
||||
|
||||
Using Vagrant
|
||||
=============
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the
|
||||
:ref:`ubuntu_linux` installation path. This version may sometimes be out of date.
|
||||
|
||||
**Requirements:**
|
||||
This guide will setup a new virtual machine with docker installed on your computer. This works on most operating
|
||||
systems, including MacOX, Windows, Linux, FreeBSD and others. If you can install these and have at least 400Mb RAM
|
||||
to spare you should be good.
|
||||
|
||||
|
||||
Install Vagrant and Virtualbox
|
||||
------------------------------
|
||||
|
||||
1. Install virtualbox from https://www.virtualbox.org/ (or use your package manager)
|
||||
2. Install vagrant from http://www.vagrantup.com/ (or use your package manager)
|
||||
3. Install git if you had not installed it before, check if it is installed by running
|
||||
``git`` in a terminal window
|
||||
|
||||
|
||||
Spin it up
|
||||
----------
|
||||
|
||||
1. Fetch the docker sources (this includes the Vagrantfile for machine setup).
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/dotcloud/docker.git
|
||||
|
||||
2. Run vagrant from the sources directory
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant up
|
||||
|
||||
Vagrant will:
|
||||
|
||||
* Download the 'official' Precise64 base ubuntu virtual machine image from vagrantup.com
|
||||
* Boot this image in virtualbox
|
||||
* Add the `Docker PPA sources <https://launchpad.net/~dotcloud/+archive/lxc-docker>`_ to /etc/apt/sources.lst
|
||||
* Update your sources
|
||||
* Install lxc-docker
|
||||
|
||||
You now have a Ubuntu Virtual Machine running with docker pre-installed.
|
||||
|
||||
Connect
|
||||
-------
|
||||
|
||||
To access the VM and use Docker, Run ``vagrant ssh`` from the same directory as where you ran
|
||||
``vagrant up``. Vagrant will connect you to the correct VM.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
vagrant ssh
|
||||
|
||||
Run
|
||||
-----
|
||||
|
||||
Now you are in the VM, run docker
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker
|
||||
|
||||
|
||||
Continue with the :ref:`hello_world` example.
|
||||
@@ -3,8 +3,8 @@
|
||||
:keywords: Docker, Docker documentation, Windows, requirements, virtualbox, vagrant, git, ssh, putty, cygwin
|
||||
|
||||
|
||||
Windows
|
||||
=========
|
||||
Windows (with Vagrant)
|
||||
======================
|
||||
|
||||
Please note this is a community contributed installation path. The only 'official' installation is using the :ref:`ubuntu_linux` installation path. This version
|
||||
may be out of date because it depends on some binaries to be updated and published
|
||||
|
||||
6
docs/sources/nginx.conf
Normal file
6
docs/sources/nginx.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
# rule to redirect original links created when hosted on github pages
|
||||
rewrite ^/documentation/(.*).html http://docs.docker.io/en/latest/$1/ permanent;
|
||||
|
||||
# rewrite the stuff which was on the current page
|
||||
rewrite ^/gettingstarted.html$ /gettingstarted/ permanent;
|
||||
9
getKernelVersion_darwin.go
Normal file
9
getKernelVersion_darwin.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func getKernelVersion() (*KernelVersionInfo, error) {
|
||||
return nil, fmt.Errorf("Kernel version detection is not available on darwin")
|
||||
}
|
||||
69
getKernelVersion_linux.go
Normal file
69
getKernelVersion_linux.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func getKernelVersion() (*KernelVersionInfo, error) {
|
||||
var (
|
||||
uts syscall.Utsname
|
||||
flavor string
|
||||
kernel, major, minor int
|
||||
err error
|
||||
)
|
||||
|
||||
if err := syscall.Uname(&uts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
release := make([]byte, len(uts.Release))
|
||||
|
||||
i := 0
|
||||
for _, c := range uts.Release {
|
||||
release[i] = byte(c)
|
||||
i++
|
||||
}
|
||||
|
||||
// Remove the \x00 from the release for Atoi to parse correctly
|
||||
release = release[:bytes.IndexByte(release, 0)]
|
||||
|
||||
tmp := strings.SplitN(string(release), "-", 2)
|
||||
tmp2 := strings.SplitN(tmp[0], ".", 3)
|
||||
|
||||
if len(tmp2) > 0 {
|
||||
kernel, err = strconv.Atoi(tmp2[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(tmp2) > 1 {
|
||||
major, err = strconv.Atoi(tmp2[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(tmp2) > 2 {
|
||||
minor, err = strconv.Atoi(tmp2[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(tmp) == 2 {
|
||||
flavor = tmp[1]
|
||||
} else {
|
||||
flavor = ""
|
||||
}
|
||||
|
||||
return &KernelVersionInfo{
|
||||
Kernel: kernel,
|
||||
Major: major,
|
||||
Minor: minor,
|
||||
Flavor: flavor,
|
||||
}, nil
|
||||
}
|
||||
27
graph.go
27
graph.go
@@ -2,6 +2,7 @@ package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@@ -129,12 +130,32 @@ func (graph *Graph) Register(layerData Archive, img *Image) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TempLayerArchive creates a temporary archive of the given image's filesystem layer.
|
||||
// The archive is stored on disk and will be automatically deleted as soon as has been read.
|
||||
// If output is not nil, a human-readable progress bar will be written to it.
|
||||
// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives?
|
||||
func (graph *Graph) TempLayerArchive(id string, compression Compression, output io.Writer) (*TempArchive, error) {
|
||||
image, err := graph.Get(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp, err := graph.tmp()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive, err := image.TarLayer(compression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTempArchive(ProgressReader(ioutil.NopCloser(archive), 0, output, "Buffering to disk %v/%v (%v)"), tmp.Root)
|
||||
}
|
||||
|
||||
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
|
||||
func (graph *Graph) Mktemp(id string) (string, error) {
|
||||
if id == "" {
|
||||
id = GenerateId()
|
||||
}
|
||||
tmp, err := NewGraph(path.Join(graph.Root, ":tmp:"))
|
||||
tmp, err := graph.tmp()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Couldn't create temp: %s", err)
|
||||
}
|
||||
@@ -144,6 +165,10 @@ func (graph *Graph) Mktemp(id string) (string, error) {
|
||||
return tmp.imageRoot(id), nil
|
||||
}
|
||||
|
||||
func (graph *Graph) tmp() (*Graph, error) {
|
||||
return NewGraph(path.Join(graph.Root, ":tmp:"))
|
||||
}
|
||||
|
||||
// Check if given error is "not empty".
|
||||
// Note: this is the way golang does it internally with os.IsNotExists.
|
||||
func isNotEmpty(err error) bool {
|
||||
|
||||
17
hack/dockerbuilder/Dockerfile
Normal file
17
hack/dockerbuilder/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
# This will build a container capable of producing an official binary build of docker and
|
||||
# uploading it to S3
|
||||
from ubuntu:12.10
|
||||
run apt-get update
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q s3cmd
|
||||
# Packages required to checkout and build docker
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q golang
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q git
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q build-essential
|
||||
# Packages required to build an ubuntu package
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q debhelper
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q autotools-dev
|
||||
copy fake_initctl /usr/local/bin/initctl
|
||||
run DEBIAN_FRONTEND=noninteractive apt-get install -y -q devscripts
|
||||
copy dockerbuilder /usr/local/bin/dockerbuilder
|
||||
copy s3cfg /.s3cfg
|
||||
# run $img dockerbuilder $REVISION_OR_TAG $S3_ID $S3_KEY
|
||||
53
hack/dockerbuilder/dockerbuilder
Normal file
53
hack/dockerbuilder/dockerbuilder
Normal file
@@ -0,0 +1,53 @@
|
||||
#!/bin/sh
|
||||
set -x
|
||||
set -e
|
||||
|
||||
PACKAGE=github.com/dotcloud/docker
|
||||
|
||||
if [ $# -gt 1 ]; then
|
||||
echo "Usage: $0 [REVISION]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export REVISION=$1
|
||||
|
||||
if [ -z "$AWS_ID" ]; then
|
||||
echo "Warning: environment variable AWS_ID is not set. Won't upload to S3."
|
||||
NO_S3=1
|
||||
fi
|
||||
|
||||
if [ -z "$AWS_KEY" ]; then
|
||||
echo "Warning: environment variable AWS_KEY is not set. Won't upload to S3."
|
||||
NO_S3=1
|
||||
fi
|
||||
|
||||
if [ -z "$GPG_KEY" ]; then
|
||||
echo "Warning: environment variable GPG_KEY is not set. Ubuntu package upload will not succeed."
|
||||
NO_UBUNTU=1
|
||||
fi
|
||||
|
||||
if [ -z "$REVISION" ]; then
|
||||
rm -fr docker-master
|
||||
git clone https://github.com/dotcloud/docker docker-master
|
||||
cd docker-master
|
||||
else
|
||||
rm -fr docker-$REVISION
|
||||
git init docker-$REVISION
|
||||
cd docker-$REVISION
|
||||
git fetch -t https://github.com/dotcloud/docker $REVISION
|
||||
git reset --hard FETCH_HEAD
|
||||
fi
|
||||
|
||||
if [ -z "$REVISION" ]; then
|
||||
make release
|
||||
else
|
||||
make release RELEASE_VERSION=$REVISION
|
||||
fi
|
||||
|
||||
if [ -z "$NO_S3" ]; then
|
||||
s3cmd -P put docker-$REVISION.tgz s3://get.docker.io/builds/$(uname -s)/$(uname -m)/docker-$REVISION.tgz
|
||||
fi
|
||||
|
||||
if [ -z "$NO_UBUNTU" ]; then
|
||||
(cd packaging/ubuntu && make ubuntu)
|
||||
fi
|
||||
3
hack/dockerbuilder/fake_initctl
Executable file
3
hack/dockerbuilder/fake_initctl
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo Whatever you say, man
|
||||
3
hack/dockerbuilder/s3cfg
Normal file
3
hack/dockerbuilder/s3cfg
Normal file
@@ -0,0 +1,3 @@
|
||||
[default]
|
||||
access_key = $AWS_ID
|
||||
secret_key = $AWS_KEY
|
||||
36
image.go
36
image.go
@@ -92,7 +92,7 @@ func MountAUFS(ro []string, rw string, target string) error {
|
||||
rwBranch := fmt.Sprintf("%v=rw", rw)
|
||||
roBranches := ""
|
||||
for _, layer := range ro {
|
||||
roBranches += fmt.Sprintf("%v=ro:", layer)
|
||||
roBranches += fmt.Sprintf("%v=ro+wh:", layer)
|
||||
}
|
||||
branches := fmt.Sprintf("br:%v:%v", rwBranch, roBranches)
|
||||
|
||||
@@ -110,6 +110,15 @@ func MountAUFS(ro []string, rw string, target string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TarLayer returns a tar archive of the image's filesystem layer.
|
||||
func (image *Image) TarLayer(compression Compression) (Archive, error) {
|
||||
layerPath, err := image.layer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Tar(layerPath, compression)
|
||||
}
|
||||
|
||||
func (image *Image) Mount(root, rw string) error {
|
||||
if mounted, err := Mounted(root); err != nil {
|
||||
return err
|
||||
@@ -127,34 +136,9 @@ func (image *Image) Mount(root, rw string) error {
|
||||
if err := os.Mkdir(rw, 0755); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
// FIXME: @creack shouldn't we do this after going over changes?
|
||||
if err := MountAUFS(layers, rw, root); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: Create tests for deletion
|
||||
// FIXME: move this part to change.go
|
||||
// Retrieve the changeset from the parent and apply it to the container
|
||||
// - Retrieve the changes
|
||||
changes, err := Changes(layers, layers[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Iterate on changes
|
||||
for _, c := range changes {
|
||||
// If there is a delete
|
||||
if c.Kind == ChangeDelete {
|
||||
// Make sure the directory exists
|
||||
file_path, file_name := path.Dir(c.Path), path.Base(c.Path)
|
||||
if err := os.MkdirAll(path.Join(rw, file_path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
// And create the whiteout (we just need to create empty file, discard the return)
|
||||
if _, err := os.Create(path.Join(path.Join(rw, file_path),
|
||||
".wh."+path.Base(file_name))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
25
packaging/archlinux/README.archlinux
Normal file
25
packaging/archlinux/README.archlinux
Normal file
@@ -0,0 +1,25 @@
|
||||
Docker on Arch
|
||||
==============
|
||||
|
||||
The AUR lxc-docker and lxc-docker-git packages handle building docker on Arch
|
||||
linux. The PKGBUILD specifies all dependencies, build, and packaging steps.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
The only buildtime dependencies are git and go which are available via pacman.
|
||||
The -s flag can be used on makepkg commands below to automatically install
|
||||
these dependencies.
|
||||
|
||||
Building Package
|
||||
================
|
||||
|
||||
Download the tarball for either AUR packaged to a local directory. In that
|
||||
directory makepkg can be run to build the package.
|
||||
|
||||
# Build the binary package
|
||||
makepkg
|
||||
|
||||
# Build an updated source tarball
|
||||
makepkg --source
|
||||
|
||||
@@ -1,30 +1,122 @@
|
||||
lxc-docker (0.2.0-1) precise; urgency=low
|
||||
|
||||
- Runtime: ghost containers can be killed and waited for
|
||||
- Documentation: update install intructions
|
||||
- Packaging: fix Vagrantfile
|
||||
- Development: automate releasing binaries and ubuntu packages
|
||||
- Add a changelog
|
||||
- Various bugfixes
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 23 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.8-1) precise; urgency=low
|
||||
|
||||
- Dynamically detect cgroup capabilities
|
||||
- Issue stability warning on kernels <3.8
|
||||
- 'docker push' buffers on disk instead of memory
|
||||
- Fix 'docker diff' for removed files
|
||||
- Fix 'docker stop' for ghost containers
|
||||
- Fix handling of pidfile
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 22 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.7-1) precise; urgency=low
|
||||
|
||||
- Container ports are available on localhost
|
||||
- 'docker ps' shows allocated TCP ports
|
||||
- Contributors can run 'make hack' to start a continuous integration VM
|
||||
- Streamline ubuntu packaging & uploading
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Thu, 18 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.6-1) precise; urgency=low
|
||||
|
||||
Improvements [+], Updates [*], Bug fixes [-]:
|
||||
+ Multiple improvements, updates and bug fixes
|
||||
- Record the author an image with 'docker commit -author'
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 20:43:43 -0700
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.4.1-1) precise; urgency=low
|
||||
lxc-docker (0.1.5-1) precise; urgency=low
|
||||
|
||||
Improvements [+], Updates [*], Bug fixes [-]:
|
||||
* Test PPA
|
||||
- Disable standalone mode
|
||||
- Use a custom DNS resolver with 'docker -d -dns'
|
||||
- Detect ghost containers
|
||||
- Improve diagnosis of missing system capabilities
|
||||
- Allow disabling memory limits at compile time
|
||||
- Add debian packaging
|
||||
- Documentation: installing on Arch Linux
|
||||
- Documentation: running Redis on docker
|
||||
- Fixed lxc 0.9 compatibility
|
||||
- Automatically load aufs module
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 15 Apr 2013 12:14:50 -0700
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 17 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.4-1) precise; urgency=low
|
||||
|
||||
Improvements [+], Updates [*], Bug fixes [-]:
|
||||
* Changed default bridge interface do 'docker0'
|
||||
- Fix a race condition when running the port allocator
|
||||
- Full support for TTY emulation
|
||||
- Detach from a TTY session with the escape sequence `C-p C-q`
|
||||
- Various bugfixes and stability improvements
|
||||
- Minor UI improvements
|
||||
- Automatically create our own bridge interface 'docker0'
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Fri, 12 Apr 2013 12:20:06 -0700
|
||||
-- dotCloud <ops@dotcloud.com> Tue, 9 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.0-1) unstable; urgency=low
|
||||
lxc-docker (0.1.3-1) precise; urgency=low
|
||||
|
||||
* Initial release
|
||||
- Choose TCP frontend port with '-p :PORT'
|
||||
- Layer format is versioned
|
||||
- Major reliability improvements to the process manager
|
||||
- Various bugfixes and stability improvements
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Mon, 25 Mar 2013 05:51:12 -0700
|
||||
-- dotCloud <ops@dotcloud.com> Thu, 4 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.2-1) precise; urgency=low
|
||||
|
||||
- Set container hostname with 'docker run -h'
|
||||
- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]'
|
||||
- Various bugfixes and stability improvements
|
||||
- UI polish
|
||||
- Progress bar on push/pull
|
||||
- Use XZ compression by default
|
||||
- Make IP allocator lazy
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Wed, 3 Apr 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.1-1) precise; urgency=low
|
||||
|
||||
- Display shorthand IDs for convenience
|
||||
- Stabilize process management
|
||||
- Layers can include a commit message
|
||||
- Simplified 'docker attach'
|
||||
- Fixed support for re-attaching
|
||||
- Various bugfixes and stability improvements
|
||||
- Auto-download at run
|
||||
- Auto-login on push
|
||||
- Beefed up documentation
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Sun, 31 Mar 2013 00:00:00 -0700
|
||||
|
||||
|
||||
lxc-docker (0.1.0-1) precise; urgency=low
|
||||
|
||||
- First release
|
||||
- Implement registry in order to push/pull images
|
||||
- TCP port allocation
|
||||
- Fix termcaps on Linux
|
||||
- Add documentation
|
||||
- Add Vagrant support with Vagrantfile
|
||||
- Add unit tests
|
||||
- Add repository/tags to ease image management
|
||||
- Improve the layer implementation
|
||||
|
||||
-- dotCloud <ops@dotcloud.com> Sat, 23 Mar 2013 00:00:00 -0700
|
||||
|
||||
@@ -15,9 +15,12 @@ accessed adding the following line to /etc/apt/sources.list ::
|
||||
Releasing a new package
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The most relevant information to update is the changelog file:
|
||||
The most relevant information to update is the packaging/ubuntu/changelog file:
|
||||
Each new release should create a new first paragraph with new release version,
|
||||
changes, and the maintainer information.
|
||||
changes, and the maintainer information. The core of this paragraph is
|
||||
located on CHANGELOG.md. Make sure to transcribe it and translate the formats
|
||||
(eg: packaging/ubuntu/changelog uses 2 spaces for body change descriptions
|
||||
instead of 1 space from CHANGELOG.md)
|
||||
|
||||
Assuming your PPA GPG signing key is on /media/usbdrive/docker.key, load it
|
||||
into the GPG_KEY environment variable with::
|
||||
@@ -28,8 +31,9 @@ into the GPG_KEY environment variable with::
|
||||
After this is done and you are ready to upload the package to the PPA, you have
|
||||
a couple of choices:
|
||||
|
||||
* Follow README.debian to generate the actual source packages and upload them
|
||||
to the PPA
|
||||
* Follow packaging/ubuntu/README.ubuntu to generate the actual source packages
|
||||
and upload them to the PPA
|
||||
|
||||
* Let vagrant do all the work for you::
|
||||
|
||||
( cd docker/packaging/ubuntu; vagrant up )
|
||||
|
||||
23
registry.go
23
registry.go
@@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
@@ -135,7 +136,7 @@ func (graph *Graph) getRemoteImage(stdout io.Writer, imgId string, authConfig *a
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return img, ProgressReader(res.Body, int(res.ContentLength), stdout), nil
|
||||
return img, ProgressReader(res.Body, int(res.ContentLength), stdout, "Downloading %v/%v (%v)"), nil
|
||||
}
|
||||
|
||||
func (graph *Graph) PullImage(stdout io.Writer, imgId string, authConfig *auth.AuthConfig) error {
|
||||
@@ -269,24 +270,20 @@ func (graph *Graph) PushImage(stdout io.Writer, imgOrig *Image, authConfig *auth
|
||||
return fmt.Errorf("Failed to retrieve layer upload location: %s", err)
|
||||
}
|
||||
|
||||
// FIXME: Don't do this :D. Check the S3 requierement and implement chunks of 5MB
|
||||
// FIXME2: I won't stress it enough, DON'T DO THIS! very high priority
|
||||
layerData2, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
|
||||
tmp, err := ioutil.ReadAll(layerData2)
|
||||
// FIXME: stream the archive directly to the registry instead of buffering it on disk. This requires either:
|
||||
// a) Implementing S3's proprietary streaming logic, or
|
||||
// b) Stream directly to the registry instead of S3.
|
||||
// I prefer option b. because it doesn't lock us into a proprietary cloud service.
|
||||
tmpLayer, err := graph.TempLayerArchive(img.Id, Xz, stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layerLength := len(tmp)
|
||||
|
||||
layerData, err := Tar(path.Join(graph.Root, img.Id, "layer"), Xz)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to generate layer archive: %s", err)
|
||||
}
|
||||
req3, err := http.NewRequest("PUT", url.String(), ProgressReader(layerData.(io.ReadCloser), layerLength, stdout))
|
||||
defer os.Remove(tmpLayer.Name())
|
||||
req3, err := http.NewRequest("PUT", url.String(), ProgressReader(tmpLayer, int(tmpLayer.Size), stdout, "Uploading %v/%v (%v)"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req3.ContentLength = int64(layerLength)
|
||||
req3.ContentLength = int64(tmpLayer.Size)
|
||||
|
||||
req3.TransferEncoding = []string{"none"}
|
||||
res3, err := client.Do(req3)
|
||||
|
||||
56
runtime.go
56
runtime.go
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -14,6 +15,11 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Capabilities struct {
|
||||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
}
|
||||
|
||||
type Runtime struct {
|
||||
root string
|
||||
repository string
|
||||
@@ -23,6 +29,8 @@ type Runtime struct {
|
||||
repositories *TagStore
|
||||
authConfig *auth.AuthConfig
|
||||
idIndex *TruncIndex
|
||||
capabilities *Capabilities
|
||||
kernelVersion *KernelVersionInfo
|
||||
}
|
||||
|
||||
var sysInitPath string
|
||||
@@ -176,12 +184,6 @@ func (runtime *Runtime) Register(container *Container) error {
|
||||
}
|
||||
}
|
||||
|
||||
// If the container is not running or just has been flagged not running
|
||||
// then close the wait lock chan (will be reset upon start)
|
||||
if !container.State.Running {
|
||||
close(container.waitLock)
|
||||
}
|
||||
|
||||
// Even if not running, we init the lock (prevents races in start/stop/kill)
|
||||
container.State.initLock()
|
||||
|
||||
@@ -199,6 +201,15 @@ func (runtime *Runtime) Register(container *Container) error {
|
||||
// done
|
||||
runtime.containers.PushBack(container)
|
||||
runtime.idIndex.Add(container.Id)
|
||||
|
||||
// If the container is not running or just has been flagged not running
|
||||
// then close the wait lock chan (will be reset upon start)
|
||||
if !container.State.Running {
|
||||
close(container.waitLock)
|
||||
} else {
|
||||
container.allocateNetwork()
|
||||
go container.monitor()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -282,7 +293,37 @@ func (runtime *Runtime) restore() error {
|
||||
|
||||
// FIXME: harmonize with NewGraph()
|
||||
func NewRuntime() (*Runtime, error) {
|
||||
return NewRuntimeFromDirectory("/var/lib/docker")
|
||||
runtime, err := NewRuntimeFromDirectory("/var/lib/docker")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if k, err := GetKernelVersion(); err != nil {
|
||||
log.Printf("WARNING: %s\n", err)
|
||||
} else {
|
||||
runtime.kernelVersion = k
|
||||
if CompareKernelVersion(k, &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
|
||||
log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
|
||||
}
|
||||
}
|
||||
|
||||
if cgroupMemoryMountpoint, err := FindCgroupMountpoint("memory"); err != nil {
|
||||
log.Printf("WARNING: %s\n", err)
|
||||
} else {
|
||||
_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes"))
|
||||
_, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes"))
|
||||
runtime.capabilities.MemoryLimit = err1 == nil && err2 == nil
|
||||
if !runtime.capabilities.MemoryLimit {
|
||||
log.Printf("WARNING: Your kernel does not support cgroup memory limit.")
|
||||
}
|
||||
|
||||
_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
|
||||
runtime.capabilities.SwapLimit = err == nil
|
||||
if !runtime.capabilities.SwapLimit {
|
||||
log.Printf("WARNING: Your kernel does not support cgroup swap limit.")
|
||||
}
|
||||
}
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
func NewRuntimeFromDirectory(root string) (*Runtime, error) {
|
||||
@@ -321,6 +362,7 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) {
|
||||
repositories: repositories,
|
||||
authConfig: authConfig,
|
||||
idIndex: NewTruncIndex(),
|
||||
capabilities: &Capabilities{},
|
||||
}
|
||||
|
||||
if err := runtime.restore(); err != nil {
|
||||
|
||||
@@ -48,8 +48,6 @@ func layerArchive(tarfile string) (io.Reader, error) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
NO_MEMORY_LIMIT = os.Getenv("NO_MEMORY_LIMIT") == "1"
|
||||
|
||||
// Hack to run sys init during unit testing
|
||||
if SelfPath() == "/sbin/init" {
|
||||
SysInit()
|
||||
@@ -275,7 +273,7 @@ func TestAllocatePortLocalhost(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer container.Kill()
|
||||
time.Sleep(300 * time.Millisecond) // Wait for the container to run
|
||||
time.Sleep(600 * time.Millisecond) // Wait for the container to run
|
||||
conn, err := net.Dial("tcp",
|
||||
fmt.Sprintf(
|
||||
"localhost:%s", container.NetworkSettings.PortMapping["5555"],
|
||||
|
||||
87
utils.go
87
utils.go
@@ -12,6 +12,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -69,23 +70,30 @@ type progressReader struct {
|
||||
readTotal int // Expected stream length (bytes)
|
||||
readProgress int // How much has been read so far (bytes)
|
||||
lastUpdate int // How many bytes read at least update
|
||||
template string // Template to print. Default "%v/%v (%v)"
|
||||
}
|
||||
|
||||
func (r *progressReader) Read(p []byte) (n int, err error) {
|
||||
read, err := io.ReadCloser(r.reader).Read(p)
|
||||
r.readProgress += read
|
||||
|
||||
// Only update progress for every 1% read
|
||||
updateEvery := int(0.01 * float64(r.readTotal))
|
||||
if r.readProgress-r.lastUpdate > updateEvery || r.readProgress == r.readTotal {
|
||||
fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r",
|
||||
r.readProgress,
|
||||
r.readTotal,
|
||||
float64(r.readProgress)/float64(r.readTotal)*100)
|
||||
updateEvery := 4096
|
||||
if r.readTotal > 0 {
|
||||
// Only update progress for every 1% read
|
||||
if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery {
|
||||
updateEvery = increment
|
||||
}
|
||||
}
|
||||
if r.readProgress-r.lastUpdate > updateEvery || err != nil {
|
||||
if r.readTotal > 0 {
|
||||
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100))
|
||||
} else {
|
||||
fmt.Fprintf(r.output, r.template+"\r", r.readProgress, "?", "n/a")
|
||||
}
|
||||
r.lastUpdate = r.readProgress
|
||||
}
|
||||
// Send newline when complete
|
||||
if err == io.EOF {
|
||||
if err != nil {
|
||||
fmt.Fprintf(r.output, "\n")
|
||||
}
|
||||
|
||||
@@ -94,8 +102,11 @@ func (r *progressReader) Read(p []byte) (n int, err error) {
|
||||
func (r *progressReader) Close() error {
|
||||
return io.ReadCloser(r.reader).Close()
|
||||
}
|
||||
func ProgressReader(r io.ReadCloser, size int, output io.Writer) *progressReader {
|
||||
return &progressReader{r, output, size, 0, 0}
|
||||
func ProgressReader(r io.ReadCloser, size int, output io.Writer, template string) *progressReader {
|
||||
if template == "" {
|
||||
template = "%v/%v (%v)"
|
||||
}
|
||||
return &progressReader{r, output, size, 0, 0, template}
|
||||
}
|
||||
|
||||
// HumanDuration returns a human-readable approximation of a duration
|
||||
@@ -384,3 +395,59 @@ func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
type KernelVersionInfo struct {
|
||||
Kernel int
|
||||
Major int
|
||||
Minor int
|
||||
Flavor string
|
||||
}
|
||||
|
||||
// FIXME: this doens't build on Darwin
|
||||
func GetKernelVersion() (*KernelVersionInfo, error) {
|
||||
return getKernelVersion()
|
||||
}
|
||||
|
||||
func (k *KernelVersionInfo) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d-%s", k.Kernel, k.Major, k.Minor, k.Flavor)
|
||||
}
|
||||
|
||||
// Compare two KernelVersionInfo struct.
|
||||
// Returns -1 if a < b, = if a == b, 1 it a > b
|
||||
func CompareKernelVersion(a, b *KernelVersionInfo) int {
|
||||
if a.Kernel < b.Kernel {
|
||||
return -1
|
||||
} else if a.Kernel > b.Kernel {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Major < b.Major {
|
||||
return -1
|
||||
} else if a.Major > b.Major {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.Minor < b.Minor {
|
||||
return -1
|
||||
} else if a.Minor > b.Minor {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func FindCgroupMountpoint(cgroupType string) (string, error) {
|
||||
output, err := exec.Command("mount").CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
reg := regexp.MustCompile(`^.* on (.*) type cgroup \(.*` + cgroupType + `[,\)]`)
|
||||
for _, line := range strings.Split(string(output), "\n") {
|
||||
r := reg.FindStringSubmatch(line)
|
||||
if len(r) == 2 {
|
||||
return r[1], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
|
||||
}
|
||||
|
||||
@@ -228,3 +228,36 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
|
||||
t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
|
||||
}
|
||||
}
|
||||
|
||||
func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
|
||||
if r := CompareKernelVersion(a, b); r != result {
|
||||
t.Fatalf("Unepected kernel version comparaison result. Found %d, expected %d", r, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareKernelVersion(t *testing.T) {
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
0)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
-1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
&KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
|
||||
1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"},
|
||||
0)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
|
||||
1)
|
||||
assertKernelVersion(t,
|
||||
&KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"},
|
||||
&KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
|
||||
-1)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user