mirror of
https://github.com/moby/moby.git
synced 2026-01-15 09:51:27 +00:00
Compare commits
131 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c348c04fdf | ||
|
|
5ca0fedffc | ||
|
|
562a235763 | ||
|
|
eb131250aa | ||
|
|
9ec29fddf8 | ||
|
|
d015d31b1c | ||
|
|
f6292ab8d0 | ||
|
|
c3f2597546 | ||
|
|
a4f3cd62a3 | ||
|
|
11d61ec77f | ||
|
|
40c90796e9 | ||
|
|
7fb55f7725 | ||
|
|
7a7c3d87d8 | ||
|
|
42df36b6fc | ||
|
|
0354c3a95c | ||
|
|
189c40597f | ||
|
|
98193a397e | ||
|
|
c45cd9174e | ||
|
|
ee65633620 | ||
|
|
3be2ea021d | ||
|
|
1d0d093d0b | ||
|
|
38a49951b3 | ||
|
|
772765c404 | ||
|
|
71e89fdc75 | ||
|
|
5530cc29b2 | ||
|
|
82aed48bbc | ||
|
|
0fd9c98de3 | ||
|
|
762a0cc472 | ||
|
|
e24e9c09f8 | ||
|
|
93ff70a3e7 | ||
|
|
c6350bcc24 | ||
|
|
9a6333466b | ||
|
|
187eed7da5 | ||
|
|
368e308971 | ||
|
|
149156a272 | ||
|
|
8298a200ce | ||
|
|
c9cebf3a73 | ||
|
|
53a01aebd6 | ||
|
|
46c8b11f24 | ||
|
|
10b794b332 | ||
|
|
a3ee36b747 | ||
|
|
e525ad3f9b | ||
|
|
322a42b8a2 | ||
|
|
90ea81433f | ||
|
|
d7f26c93d9 | ||
|
|
d4e61b48eb | ||
|
|
1a6fc02348 | ||
|
|
010d74ec2f | ||
|
|
1137ecf7d1 | ||
|
|
a25a80b2d3 | ||
|
|
a95712899e | ||
|
|
b57051a724 | ||
|
|
bb76985d39 | ||
|
|
28ec47c441 | ||
|
|
94c803e9f0 | ||
|
|
13ebc68636 | ||
|
|
d003cfea25 | ||
|
|
f3103e5c91 | ||
|
|
0eb5f233d6 | ||
|
|
ef7e000a13 | ||
|
|
811341423b | ||
|
|
b3addb5fb8 | ||
|
|
00ee6d1925 | ||
|
|
6f8a79c23c | ||
|
|
cf8063d152 | ||
|
|
3e10fe1a15 | ||
|
|
45ecdf9c8e | ||
|
|
b942f24ba8 | ||
|
|
3779291e9b | ||
|
|
fa14a1b983 | ||
|
|
f9e14cc838 | ||
|
|
f2ea539467 | ||
|
|
7c4e5fbd46 | ||
|
|
97ef8a067c | ||
|
|
4b9e475a3d | ||
|
|
1d0aeae339 | ||
|
|
859856b3e4 | ||
|
|
b8b18a2b42 | ||
|
|
561d1db074 | ||
|
|
1f9abfe841 | ||
|
|
2b93f18223 | ||
|
|
8f3b8f3835 | ||
|
|
fca83b4cfb | ||
|
|
444a087ac2 | ||
|
|
e1c861cf33 | ||
|
|
6fe3da9924 | ||
|
|
f43f3fa218 | ||
|
|
1e551c7cc5 | ||
|
|
2c395ce8fb | ||
|
|
7799ae27ca | ||
|
|
bb754fd34d | ||
|
|
a0298c0bd0 | ||
|
|
a86a82cb7e | ||
|
|
36ab1836f9 | ||
|
|
5e8912e0e8 | ||
|
|
4e414f6205 | ||
|
|
4f31141e13 | ||
|
|
82531f7168 | ||
|
|
ee6823d797 | ||
|
|
1363dfdd1d | ||
|
|
b308e33106 | ||
|
|
e5b09523dc | ||
|
|
d44abae873 | ||
|
|
323c4b5211 | ||
|
|
5b4a0cac4e | ||
|
|
73294b6d56 | ||
|
|
d6ca05f7cb | ||
|
|
dfc2dc4d35 | ||
|
|
fc559d9992 | ||
|
|
ea762c1a51 | ||
|
|
599009191a | ||
|
|
af50b2f17c | ||
|
|
2a1181f404 | ||
|
|
f7afbf34fe | ||
|
|
3069bf9460 | ||
|
|
32b9a429c5 | ||
|
|
76910d16cc | ||
|
|
42c7dc448f | ||
|
|
f560b87a86 | ||
|
|
c561212b83 | ||
|
|
589515c717 | ||
|
|
523f726716 | ||
|
|
848f290012 | ||
|
|
9dd7ae4074 | ||
|
|
c6dcee329d | ||
|
|
83d631b6a4 | ||
|
|
8b82b0dfe7 | ||
|
|
3264c1c5eb | ||
|
|
b8cd2bc94d | ||
|
|
8f382aaecd | ||
|
|
821a82ac6c |
23
.travis.yml
Normal file
23
.travis.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Note: right now we don't use go-specific features of travis.
|
||||
# Later we might automate "go test" etc. (or do it inside a docker container...?)
|
||||
|
||||
language: go
|
||||
|
||||
go: 1.2
|
||||
|
||||
# Disable the normal go build.
|
||||
install: true
|
||||
|
||||
before_script:
|
||||
- env | sort
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -qq python-yaml
|
||||
- git remote add upstream git://github.com/dotcloud/docker.git
|
||||
- git fetch --append --no-tags upstream refs/heads/master:refs/remotes/upstream/master
|
||||
# sometimes we have upstream master already as origin/master (PRs), but other times we don't, so let's just make sure we have a completely unambiguous way to specify "upstream master" from here out
|
||||
|
||||
script:
|
||||
- hack/travis/dco.py
|
||||
- hack/travis/gofmt.py
|
||||
|
||||
# vim:set sw=2 ts=2:
|
||||
2
AUTHORS
2
AUTHORS
@@ -48,7 +48,6 @@ Daniel YC Lin <dlin.tw@gmail.com>
|
||||
Darren Coxall <darren@darrencoxall.com>
|
||||
David Calavera <david.calavera@gmail.com>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
Dinesh Subhraveti <dineshs@altiscale.com>
|
||||
Deni Bertovic <deni@kset.org>
|
||||
Dominik Honnef <dominik@honnef.co>
|
||||
Don Spaulding <donspauldingii@gmail.com>
|
||||
@@ -149,6 +148,7 @@ odk- <github@odkurzacz.org>
|
||||
Pascal Borreli <pascal@borreli.com>
|
||||
Paul Bowsher <pbowsher@globalpersonals.co.uk>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Paul Liétar <paul@lietar.net>
|
||||
Paul Nasrat <pnasrat@gmail.com>
|
||||
Phil Spitler <pspitler@gmail.com>
|
||||
Piotr Bogdan <ppbogdan@gmail.com>
|
||||
|
||||
64
CHANGELOG.md
64
CHANGELOG.md
@@ -1,6 +1,68 @@
|
||||
# Changelog
|
||||
|
||||
## 0.7.3 (2013-01-02)
|
||||
## 0.7.5 (2014-01-09)
|
||||
|
||||
#### Builder
|
||||
|
||||
* Disable compression for build. More space usage but a much faster upload
|
||||
- Fix ADD caching for certain paths
|
||||
- Do not compress archive from git build
|
||||
|
||||
#### Documentation
|
||||
|
||||
- Fix error in GROUP add example
|
||||
* Make sure the GPG fingerprint is inline in the documentation
|
||||
* Give more specific advice on setting up signing of commits for DCO
|
||||
|
||||
#### Runtime
|
||||
|
||||
- Fix misspelled container names
|
||||
- Do not add hostname when networking is disabled
|
||||
* Return most recent image from the cache by date
|
||||
- Return all errors from docker wait
|
||||
* Add Content-Type Header "application/json" to GET /version and /info responses
|
||||
|
||||
#### Other
|
||||
|
||||
* Update DCO to version 1.1
|
||||
+ Update Makefile to use "docker:GIT_BRANCH" as the generated image name
|
||||
* Update Travis to check for new 1.1 DCO version
|
||||
|
||||
## 0.7.4 (2014-01-07)
|
||||
|
||||
#### Builder
|
||||
|
||||
- Fix ADD caching issue with . prefixed path
|
||||
- Fix docker build on devicemapper by reverting sparse file tar option
|
||||
- Fix issue with file caching and prevent wrong cache hit
|
||||
* Use same error handling while unmarshalling CMD and ENTRYPOINT
|
||||
|
||||
#### Documentation
|
||||
|
||||
* Simplify and streamline Amazon Quickstart
|
||||
* Install instructions use unprefixed fedora image
|
||||
* Update instructions for mtu flag for Docker on GCE
|
||||
+ Add Ubuntu Saucy to installation
|
||||
- Fix for wrong version warning on master instead of latest
|
||||
|
||||
#### Runtime
|
||||
|
||||
- Only get the image's rootfs when we need to calculate the image size
|
||||
- Correctly handle unmapping UDP ports
|
||||
* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build
|
||||
- Fix login message to say pull instead of push
|
||||
- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN
|
||||
* Make blank -H option default to the same as no -H was sent
|
||||
* Extract cgroups utilities to own submodule
|
||||
|
||||
#### Other
|
||||
|
||||
+ Add Travis CI configuration to validate DCO and gofmt requirements
|
||||
+ Add Developer Certificate of Origin Text
|
||||
* Upgrade VBox Guest Additions
|
||||
* Check standalone header when pinging a registry server
|
||||
|
||||
## 0.7.3 (2014-01-02)
|
||||
|
||||
#### Builder
|
||||
|
||||
|
||||
@@ -105,17 +105,52 @@ name and email address match your git configuration. The AUTHORS file is
|
||||
regenerated occasionally from the git commit history, so a mismatch may result
|
||||
in your changes being overwritten.
|
||||
|
||||
### Approval
|
||||
### Sign your work
|
||||
|
||||
Docker maintainers use LGTM (looks good to me) in comments on the code review
|
||||
to indicate acceptance.
|
||||
The sign-off is a simple line at the end of the explanation for the
|
||||
patch, which certifies that you wrote it or otherwise have the right to
|
||||
pass it on as an open-source patch. The rules are pretty simple: if you
|
||||
can certify the below:
|
||||
|
||||
```
|
||||
Docker Developer Grant and Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to the Docker Project ("Project"), I represent and warrant that:
|
||||
|
||||
a. The contribution was created in whole or in part by me and I have the right to submit the contribution on my own behalf or on behalf of a third party who has authorized me to submit this contribution to the Project; or
|
||||
|
||||
b. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right and authorization to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license) that I have identified in the contribution; or
|
||||
|
||||
c. The contribution was provided directly to me by some other person who represented and warranted (a) or (b) and I have not modified it.
|
||||
|
||||
d. I understand and agree that this Project and the contribution are publicly known and that a record of the contribution (including all personal information I submit with it, including my sign-off record) is maintained indefinitely and may be redistributed consistent with this Project or the open source license(s) involved.
|
||||
|
||||
```
|
||||
|
||||
then you just add a line to every git commit message:
|
||||
|
||||
Docker-DCO-1.1-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
One way to automate this, is customise your get ``commit.template`` by adding
|
||||
the following to your ``.git/hooks/prepare-commit-msg`` script (needs
|
||||
``chmod 755 .git/hooks/prepare-commit-msg`` ) in the docker checkout:
|
||||
|
||||
```
|
||||
#!/bin/sh
|
||||
# Auto sign all commits to allow them to be used by the Docker project.
|
||||
# see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work
|
||||
#
|
||||
GH_USER=$(git config --get github.user)
|
||||
SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p")
|
||||
grep -qs "^$SOB" "$1" || echo "\n$SOB" >> "$1"
|
||||
|
||||
```
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
|
||||
|
||||
A change requires LGTMs from an absolute majority of the maintainers of each
|
||||
component affected. For example, if a change affects docs/ and registry/, it
|
||||
needs an absolute majority from the maintainers of docs/ AND, separately, an
|
||||
absolute majority of the maintainers of registry
|
||||
|
||||
For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
|
||||
|
||||
### How can I become a maintainer?
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ Solomon Hykes <solomon@dotcloud.com> (@shykes)
|
||||
Guillaume Charmes <guillaume@dotcloud.com> (@creack)
|
||||
Victor Vieux <victor@dotcloud.com> (@vieux)
|
||||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
||||
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
api.go: Victor Vieux <victor@dotcloud.com> (@vieux)
|
||||
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
|
||||
|
||||
10
Makefile
10
Makefile
@@ -1,6 +1,9 @@
|
||||
.PHONY: all binary build cross default docs shell test
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DOCKER_IMAGE := docker:$(GIT_BRANCH)
|
||||
DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH)
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles "$(DOCKER_IMAGE)"
|
||||
|
||||
default: binary
|
||||
|
||||
@@ -14,7 +17,8 @@ cross: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
|
||||
docs:
|
||||
docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
|
||||
docker build -rm -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)"
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
|
||||
@@ -23,7 +27,7 @@ shell: build
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
build: bundles
|
||||
docker build -t docker .
|
||||
docker build -rm -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
|
||||
46
REMOTE_TODO.md
Normal file
46
REMOTE_TODO.md
Normal file
@@ -0,0 +1,46 @@
|
||||
```
|
||||
**GET**
|
||||
send objects deprecate multi-stream
|
||||
TODO "/events": getEvents, N
|
||||
ok "/info": getInfo, 1
|
||||
ok "/version": getVersion, 1
|
||||
... "/images/json": getImagesJSON, N
|
||||
TODO "/images/viz": getImagesViz, 0 yes
|
||||
TODO "/images/search": getImagesSearch, N
|
||||
#3490 "/images/{name:.*}/get": getImagesGet, 0
|
||||
TODO "/images/{name:.*}/history": getImagesHistory, N
|
||||
TODO "/images/{name:.*}/json": getImagesByName, 1
|
||||
TODO "/containers/ps": getContainersJSON, N
|
||||
TODO "/containers/json": getContainersJSON, 1
|
||||
ok "/containers/{name:.*}/export": getContainersExport, 0
|
||||
TODO "/containers/{name:.*}/changes": getContainersChanges, N
|
||||
TODO "/containers/{name:.*}/json": getContainersByName, 1
|
||||
TODO "/containers/{name:.*}/top": getContainersTop, N
|
||||
#3512 "/containers/{name:.*}/attach/ws": wsContainersAttach, 0 yes
|
||||
|
||||
**POST**
|
||||
TODO "/auth": postAuth, 0 yes
|
||||
ok "/commit": postCommit, 0
|
||||
TODO "/build": postBuild, 0 yes
|
||||
TODO "/images/create": postImagesCreate, N yes yes (pull)
|
||||
TODO "/images/{name:.*}/insert": postImagesInsert, N yes yes
|
||||
TODO "/images/load": postImagesLoad, 1 yes (stdin)
|
||||
TODO "/images/{name:.*}/push": postImagesPush, N yes
|
||||
ok "/images/{name:.*}/tag": postImagesTag, 0
|
||||
ok "/containers/create": postContainersCreate, 0
|
||||
ok "/containers/{name:.*}/kill": postContainersKill, 0
|
||||
#3476 "/containers/{name:.*}/restart": postContainersRestart, 0
|
||||
ok "/containers/{name:.*}/start": postContainersStart, 0
|
||||
ok "/containers/{name:.*}/stop": postContainersStop, 0
|
||||
ok "/containers/{name:.*}/wait": postContainersWait, 0
|
||||
ok "/containers/{name:.*}/resize": postContainersResize, 0
|
||||
#3512 "/containers/{name:.*}/attach": postContainersAttach, 0 yes
|
||||
TODO "/containers/{name:.*}/copy": postContainersCopy, 0 yes
|
||||
|
||||
**DELETE**
|
||||
#3180 "/containers/{name:.*}": deleteContainers, 0
|
||||
TODO "/images/{name:.*}": deleteImages, N
|
||||
|
||||
**OPTIONS**
|
||||
ok "": optionsHandler, 0
|
||||
```
|
||||
10
Vagrantfile
vendored
10
Vagrantfile
vendored
@@ -24,7 +24,7 @@ if [ -z "$user" ]; then
|
||||
fi
|
||||
|
||||
# Adding an apt gpg key is idempotent.
|
||||
wget -q -O - https://get.docker.io/gpg | apt-key add -
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
# Creating the docker.list file is idempotent, but it may overwrite desired
|
||||
# settings if it already exists. This could be solved with md5sum but it
|
||||
@@ -70,7 +70,7 @@ SCRIPT
|
||||
# trigger dkms to build the virtualbox guest module install.
|
||||
$vbox_script = <<VBOX_SCRIPT + $script
|
||||
# Install the VirtualBox guest additions if they aren't already installed.
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.4/ ]; then
|
||||
if [ ! -d /opt/VBoxGuestAdditions-4.3.6/ ]; then
|
||||
# Update remote package metadata. 'apt-get update' is idempotent.
|
||||
apt-get update -q
|
||||
|
||||
@@ -79,10 +79,10 @@ if [ ! -d /opt/VBoxGuestAdditions-4.3.4/ ]; then
|
||||
apt-get install -q -y linux-headers-generic-lts-raring dkms
|
||||
|
||||
echo 'Downloading VBox Guest Additions...'
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.4/VBoxGuestAdditions_4.3.4.iso
|
||||
echo "f120793fa35050a8280eacf9c930cf8d9b88795161520f6515c0cc5edda2fe8a VBoxGuestAdditions_4.3.4.iso" | sha256sum --check || exit 1
|
||||
wget -cq http://dlc.sun.com.edgesuite.net/virtualbox/4.3.6/VBoxGuestAdditions_4.3.6.iso
|
||||
echo "95648fcdb5d028e64145a2fe2f2f28c946d219da366389295a61fed296ca79f0 VBoxGuestAdditions_4.3.6.iso" | sha256sum --check || exit 1
|
||||
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.4.iso /mnt
|
||||
mount -o loop,ro /home/vagrant/VBoxGuestAdditions_4.3.6.iso /mnt
|
||||
/mnt/VBoxLinuxAdditions.run --nox11
|
||||
umount /mnt
|
||||
fi
|
||||
|
||||
4
api.go
4
api.go
@@ -140,6 +140,7 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
@@ -216,6 +217,7 @@ func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
@@ -927,7 +929,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
||||
return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
|
||||
}
|
||||
|
||||
c, err := archive.Tar(root, archive.Bzip2)
|
||||
c, err := archive.Tar(root, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ package archive
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
@@ -149,7 +149,7 @@ func escapeName(name string) string {
|
||||
// Tar creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `filter`. If `filter` is nil, then all files are included.
|
||||
func TarFilter(path string, options *TarOptions) (io.Reader, error) {
|
||||
args := []string{"tar", "-S", "--numeric-owner", "-f", "-", "-C", path, "-T", "-"}
|
||||
args := []string{"tar", "--numeric-owner", "-f", "-", "-C", path, "-T", "-"}
|
||||
if options.Includes == nil {
|
||||
options.Includes = []string{"."}
|
||||
}
|
||||
@@ -228,7 +228,7 @@ func Untar(archive io.Reader, path string, options *TarOptions) error {
|
||||
compression := DetectCompression(buf)
|
||||
|
||||
utils.Debugf("Archive compression detected: %s", compression.Extension())
|
||||
args := []string{"-S", "--numeric-owner", "-f", "-", "-C", path, "-x" + compression.Flag()}
|
||||
args := []string{"--numeric-owner", "-f", "-", "-C", path, "-x" + compression.Flag()}
|
||||
|
||||
if options != nil {
|
||||
for _, exclude := range options.Excludes {
|
||||
@@ -299,7 +299,7 @@ func CopyWithTar(src, dst string) error {
|
||||
//
|
||||
// If `dst` ends with a trailing slash '/', the final destination path
|
||||
// will be `dst/base(src)`.
|
||||
func CopyFileWithTar(src, dst string) error {
|
||||
func CopyFileWithTar(src, dst string) (err error) {
|
||||
utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
@@ -316,25 +316,38 @@ func CopyFileWithTar(src, dst string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.Close()
|
||||
return Untar(buf, filepath.Dir(dst), nil)
|
||||
|
||||
r, w := io.Pipe()
|
||||
errC := utils.Go(func() error {
|
||||
defer w.Close()
|
||||
|
||||
srcF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
tw := tar.NewWriter(w)
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(tw, srcF); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.Close()
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if er := <-errC; err != nil {
|
||||
err = er
|
||||
}
|
||||
}()
|
||||
return Untar(r, filepath.Dir(dst), nil)
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
|
||||
78
buildfile.go
78
buildfile.go
@@ -213,16 +213,30 @@ func (b *buildFile) CmdEnv(args string) error {
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCmd(args string) error {
|
||||
func (b *buildFile) buildCmdFromJson(args string) []string {
|
||||
var cmd []string
|
||||
if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
||||
utils.Debugf("Error unmarshalling: %s, setting cmd to /bin/sh -c", err)
|
||||
utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
||||
cmd = []string{"/bin/sh", "-c", args}
|
||||
}
|
||||
if err := b.commit("", cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdCmd(args string) error {
|
||||
cmd := b.buildCmdFromJson(args)
|
||||
b.config.Cmd = cmd
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdEntrypoint(args string) error {
|
||||
entrypoint := b.buildCmdFromJson(args)
|
||||
b.config.Entrypoint = entrypoint
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
|
||||
return err
|
||||
}
|
||||
b.config.Cmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -245,23 +259,6 @@ func (b *buildFile) CmdCopy(args string) error {
|
||||
return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdEntrypoint(args string) error {
|
||||
if args == "" {
|
||||
return fmt.Errorf("Entrypoint cannot be empty")
|
||||
}
|
||||
|
||||
var entrypoint []string
|
||||
if err := json.Unmarshal([]byte(args), &entrypoint); err != nil {
|
||||
b.config.Entrypoint = []string{"/bin/sh", "-c", args}
|
||||
} else {
|
||||
b.config.Entrypoint = entrypoint
|
||||
}
|
||||
if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %s", args)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *buildFile) CmdWorkdir(workdir string) error {
|
||||
b.config.WorkingDir = workdir
|
||||
return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
||||
@@ -356,8 +353,9 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
|
||||
// FIXME: do we really need this?
|
||||
var (
|
||||
origPath = orig
|
||||
destPath = dest
|
||||
origPath = orig
|
||||
destPath = dest
|
||||
remoteHash string
|
||||
)
|
||||
|
||||
if utils.IsURL(orig) {
|
||||
@@ -376,11 +374,20 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
}
|
||||
defer os.RemoveAll(tmpDirName)
|
||||
if _, err = io.Copy(tmpFile, resp.Body); err != nil {
|
||||
tmpFile.Close()
|
||||
return err
|
||||
}
|
||||
origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
||||
tmpFile.Close()
|
||||
|
||||
// Process the checksum
|
||||
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tarSum := utils.TarSum{Reader: r, DisableCompression: true}
|
||||
remoteHash = tarSum.Sum(nil)
|
||||
|
||||
// If the destination is a directory, figure out the filename.
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
u, err := url.Parse(orig)
|
||||
@@ -410,12 +417,17 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
hash string
|
||||
sums = b.context.GetSums()
|
||||
)
|
||||
if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
||||
|
||||
if remoteHash != "" {
|
||||
hash = remoteHash
|
||||
} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
var subfiles []string
|
||||
for file, sum := range sums {
|
||||
if strings.HasPrefix(file, origPath) {
|
||||
absFile := path.Join(b.contextPath, file)
|
||||
absOrigPath := path.Join(b.contextPath, origPath)
|
||||
if strings.HasPrefix(absFile, absOrigPath) {
|
||||
subfiles = append(subfiles, sum)
|
||||
}
|
||||
}
|
||||
@@ -424,14 +436,21 @@ func (b *buildFile) CmdAdd(args string) error {
|
||||
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
||||
hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
||||
} else {
|
||||
hash = "file:" + sums[origPath]
|
||||
if origPath[0] == '/' && len(origPath) > 1 {
|
||||
origPath = origPath[1:]
|
||||
}
|
||||
origPath = strings.TrimPrefix(origPath, "./")
|
||||
if h, ok := sums[origPath]; ok {
|
||||
hash = "file:" + h
|
||||
}
|
||||
}
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)}
|
||||
hit, err := b.probeCache()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hit {
|
||||
// If we do not have a hash, never use the cache
|
||||
if hit && hash != "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -597,11 +616,12 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b.context = &utils.TarSum{Reader: context}
|
||||
b.context = &utils.TarSum{Reader: context, DisableCompression: true}
|
||||
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer os.RemoveAll(tmpdirPath)
|
||||
|
||||
b.contextPath = tmpdirPath
|
||||
filename := path.Join(tmpdirPath, "Dockerfile")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
@@ -653,7 +673,7 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
||||
}
|
||||
return b.image, nil
|
||||
}
|
||||
return "", fmt.Errorf("An error occurred during the build\n")
|
||||
return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
||||
}
|
||||
|
||||
func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig) BuildFile {
|
||||
|
||||
101
cgroups/cgroups.go
Normal file
101
cgroups/cgroups.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/mount"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
|
||||
|
||||
func FindCgroupMountpoint(subsystem string) (string, error) {
|
||||
mounts, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if mount.Fstype == "cgroup" {
|
||||
for _, opt := range strings.Split(mount.VfsOpts, ",") {
|
||||
if opt == subsystem {
|
||||
return mount.Mountpoint, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("cgroup mountpoint not found for %s", subsystem)
|
||||
}
|
||||
|
||||
// Returns the relative path to the cgroup docker is running in.
|
||||
func getThisCgroupDir(subsystem string) (string, error) {
|
||||
f, err := os.Open("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseCgroupFile(subsystem, f)
|
||||
}
|
||||
|
||||
func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
|
||||
s := bufio.NewScanner(r)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
text := s.Text()
|
||||
parts := strings.Split(text, ":")
|
||||
if parts[1] == subsystem {
|
||||
return parts[2], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", subsystem)
|
||||
}
|
||||
|
||||
// Returns a list of pids for the given container.
|
||||
func GetPidsForContainer(id string) ([]int, error) {
|
||||
pids := []int{}
|
||||
|
||||
// memory is chosen randomly, any cgroup used by docker works
|
||||
subsystem := "memory"
|
||||
|
||||
cgroupRoot, err := FindCgroupMountpoint(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
cgroupDir, err := getThisCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
// With more recent lxc versions use, cgroup will be in lxc/
|
||||
filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks")
|
||||
}
|
||||
|
||||
output, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
for _, p := range strings.Split(string(output), "\n") {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
pid, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
27
cgroups/cgroups_test.go
Normal file
27
cgroups/cgroups_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cgroups
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
cgroupsContents = `11:hugetlb:/
|
||||
10:perf_event:/
|
||||
9:blkio:/
|
||||
8:net_cls:/
|
||||
7:freezer:/
|
||||
6:devices:/
|
||||
5:memory:/
|
||||
4:cpuacct,cpu:/
|
||||
3:cpuset:/
|
||||
2:name=systemd:/user.slice/user-1000.slice/session-16.scope`
|
||||
)
|
||||
|
||||
func TestParseCgroups(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(cgroupsContents))
|
||||
_, err := parseCgroupFile("blkio", r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
10
commands.go
10
commands.go
@@ -1100,7 +1100,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
|
||||
|
||||
if err := pull(authConfig); err != nil {
|
||||
if err.Error() == registry.ErrLoginRequired.Error() {
|
||||
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
|
||||
fmt.Fprintln(cli.out, "\nPlease login prior to pull:")
|
||||
if err := cli.CmdLogin(endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2207,7 +2207,7 @@ func (cli *DockerCli) CmdSave(args ...string) error {
|
||||
}
|
||||
|
||||
func (cli *DockerCli) CmdLoad(args ...string) error {
|
||||
cmd := cli.Subcmd("load", "SOURCE", "Load an image from a tar archive")
|
||||
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2510,11 +2510,7 @@ func (cli *DockerCli) LoadConfigFile() (err error) {
|
||||
func waitForExit(cli *DockerCli, containerId string) (int, error) {
|
||||
body, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if err != ErrConnectionRefused {
|
||||
return -1, err
|
||||
}
|
||||
return -1, nil
|
||||
return -1, err
|
||||
}
|
||||
|
||||
var out APIWait
|
||||
|
||||
@@ -120,7 +120,7 @@ type BindMap struct {
|
||||
}
|
||||
|
||||
var (
|
||||
ErrContainerStart = errors.New("The container failed to start. Unkown error")
|
||||
ErrContainerStart = errors.New("The container failed to start. Unknown error")
|
||||
ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.")
|
||||
ErrInvalidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
|
||||
ErrConflictAttachDetach = errors.New("Conflicting options: -a and -d")
|
||||
@@ -1044,7 +1044,7 @@ ff02::2 ip6-allrouters
|
||||
|
||||
if container.Config.Domainname != "" {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
} else {
|
||||
} else if !container.Config.NetworkDisabled {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...)
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ func main() {
|
||||
|
||||
// Check that the requested process manager is supported
|
||||
if _, exists := templates[*kind]; !exists {
|
||||
panic("Unkown script template")
|
||||
panic("Unknown script template")
|
||||
}
|
||||
|
||||
// Load the requested template
|
||||
|
||||
@@ -125,12 +125,14 @@ Check the logs make sure it is working correctly.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker attach $CONTAINER_ID
|
||||
sudo docker attach -sig-proxy=false $CONTAINER_ID
|
||||
|
||||
Attach to the container to see the results in real-time.
|
||||
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"-sig-proxy=false"** Do not forward signals to the container; allows
|
||||
us to exit the attachment using Control-C without stopping the container.
|
||||
- **$CONTAINER_ID** The Id of the container we want to attach too.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
||||
@@ -26,18 +26,13 @@ Amazon QuickStart
|
||||
<https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
|
||||
on your AWS Console.
|
||||
|
||||
* When picking the source AMI for your instance type, select "Community
|
||||
AMIs".
|
||||
* Click the ``Select`` button for a 64Bit Ubuntu image. For example: Ubuntu Server 12.04.3 LTS
|
||||
|
||||
* Search for ``amd64 precise``. Pick one of the amd64 Ubuntu images.
|
||||
|
||||
* If you choose a EBS enabled AMI, you'll also be able to launch a
|
||||
* For testing you can use the default (possibly free)
|
||||
``t1.micro`` instance (more info on `pricing
|
||||
<http://aws.amazon.com/en/ec2/pricing/>`_). ``t1.micro`` instances are
|
||||
eligible for Amazon's Free Usage Tier.
|
||||
<http://aws.amazon.com/en/ec2/pricing/>`_).
|
||||
|
||||
* When you click select you'll be taken to the instance setup, and you're one
|
||||
click away from having your Ubuntu VM up and running.
|
||||
* Click the ``Next: Configure Instance Details`` button at the bottom right.
|
||||
|
||||
2. **Tell CloudInit to install Docker:**
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
sudo docker run -i -t fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
||||
@@ -57,12 +57,13 @@
|
||||
docker-playground:~$ curl get.docker.io | bash
|
||||
docker-playground:~$ sudo update-rc.d docker defaults
|
||||
|
||||
7. If running in zones: us-central1-a, europe-west1-1, and europe-west1-b, the docker daemon must be started with the `-mtu` flag. Without the flag, you may experience intermittent network pauses.
|
||||
7. If running in zones: ``us-central1-a``, ``europe-west1-1``, and ``europe-west1-b``, the docker daemon must be started with the ``-mtu`` flag. Without the flag, you may experience intermittent network pauses.
|
||||
`See this issue <https://code.google.com/p/google-compute-engine/issues/detail?id=57>`_ for more details.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
docker -d -mtu 1460
|
||||
docker-playground:~$ echo 'DOCKER_OPTS="$DOCKER_OPTS -mtu 1460"' | sudo tee -a /etc/default/docker
|
||||
docker-playground:~$ sudo service docker restart
|
||||
|
||||
8. Start a new container:
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ Now let's verify that Docker is working.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker run -i -t mattdm/fedora /bin/bash
|
||||
sudo docker run -i -t fedora /bin/bash
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ Ubuntu
|
||||
Docker is supported on the following versions of Ubuntu:
|
||||
|
||||
- :ref:`ubuntu_precise`
|
||||
- :ref:`ubuntu_raring`
|
||||
- :ref:`ubuntu_raring_saucy`
|
||||
|
||||
Please read :ref:`ufw`, if you plan to use `UFW (Uncomplicated
|
||||
Firewall) <https://help.ubuntu.com/community/UFW>`_
|
||||
@@ -68,13 +68,11 @@ easy. **See the :ref:`installmirrors` section below if you are not in
|
||||
the United States.** Other sources of the Debian packages may be
|
||||
faster for you to install.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
First add the Docker repository key to your local keychain.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
@@ -108,10 +106,12 @@ Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
.. _ubuntu_raring:
|
||||
.. _ubuntu_raring_saucy:
|
||||
|
||||
Ubuntu Raring 13.04 (64 bit)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Ubuntu Raring 13.04 and Saucy 13.10 (64 bit)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
These instructions cover both Ubuntu Raring 13.04 and Saucy 13.10.
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
@@ -140,13 +140,11 @@ Docker is available as a Debian package, which makes installation easy.
|
||||
Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
|
||||
to follow them again.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
First add the Docker repository key to your local keychain.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
@@ -169,7 +167,6 @@ Type ``exit`` to exit
|
||||
|
||||
**Done!**, now continue with the :ref:`hello_world` example.
|
||||
|
||||
|
||||
.. _ufw:
|
||||
|
||||
Docker and UFW
|
||||
|
||||
@@ -26,7 +26,7 @@ use ``apt-get`` to upgrade.
|
||||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
sudo sh -c "curl https://get.docker.io/gpg | apt-key add -"
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
|
||||
@@ -78,11 +78,11 @@ client commands.
|
||||
# Add the docker group if it doesn't already exist.
|
||||
sudo groupadd docker
|
||||
|
||||
# Add the connected user "${USERNAME}" to the docker group.
|
||||
# Add the connected user "${USER}" to the docker group.
|
||||
# Change the user name to match your preferred user.
|
||||
# You may have to logout and log back in again for
|
||||
# this to take effect.
|
||||
sudo gpasswd -a ${USERNAME} docker
|
||||
sudo gpasswd -a ${USER} docker
|
||||
|
||||
# Restart the docker daemon.
|
||||
sudo service docker restart
|
||||
@@ -117,6 +117,11 @@ For example:
|
||||
* ``tcp://host:4243`` -> tcp connection on host:4243
|
||||
* ``unix://path/to/socket`` -> unix socket located at ``path/to/socket``
|
||||
|
||||
``-H``, when empty, will default to the same value as when no ``-H`` was passed in.
|
||||
|
||||
``-H`` also accepts short form for TCP bindings:
|
||||
``host[:port]`` or ``:port``
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Run docker in daemon mode
|
||||
|
||||
@@ -82,7 +82,7 @@ In this scenario:
|
||||
$ sudo ifconfig bridge0 192.168.227.1 netmask 255.255.255.0
|
||||
|
||||
# Edit your Docker startup file
|
||||
$ echo "DOCKER_OPTS=\"-b=bridge0\"" /etc/default/docker
|
||||
$ echo "DOCKER_OPTS=\"-b=bridge0\"" >> /etc/default/docker
|
||||
|
||||
# Start Docker
|
||||
$ sudo service docker start
|
||||
|
||||
2
docs/theme/docker/static/js/docs.js
vendored
2
docs/theme/docker/static/js/docs.js
vendored
@@ -92,7 +92,7 @@ $(function(){
|
||||
$('.version-flyer ul').html('<li class="alternative active-slug"><a href="" title="Switch to local">Local</a></li>');
|
||||
}
|
||||
|
||||
if (doc_version == "master") {
|
||||
if (doc_version == "latest") {
|
||||
$('.version-flyer .version-note').hide();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"path"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
// ServeHTTP executes a job as specified by the http request `r`, and sends the
|
||||
@@ -22,7 +22,7 @@ func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
jobArgs = []string{}
|
||||
}
|
||||
w.Header().Set("Job-Name", jobName)
|
||||
for _, arg := range(jobArgs) {
|
||||
for _, arg := range jobArgs {
|
||||
w.Header().Add("Job-Args", arg)
|
||||
}
|
||||
job := eng.Job(jobName, jobArgs...)
|
||||
|
||||
10
graph.go
10
graph.go
@@ -87,17 +87,17 @@ func (graph *Graph) Get(name string) (*Image, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check that the filesystem layer exists
|
||||
rootfs, err := graph.driver.Get(img.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err)
|
||||
}
|
||||
if img.ID != id {
|
||||
return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID)
|
||||
}
|
||||
img.graph = graph
|
||||
|
||||
if img.Size < 0 {
|
||||
rootfs, err := graph.driver.Get(img.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err)
|
||||
}
|
||||
|
||||
var size int64
|
||||
if img.Parent == "" {
|
||||
if size, err = utils.TreeSize(rootfs); err != nil {
|
||||
|
||||
@@ -36,9 +36,8 @@ func (d *Driver) Cleanup() error {
|
||||
}
|
||||
|
||||
func copyDir(src, dst string) error {
|
||||
cmd := exec.Command("cp", "-aT", "--reflink=auto", src, dst)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Release Checklist
|
||||
## A maintainer's guide to releasing Docker
|
||||
|
||||
So you're in charge of a Docker release? Cool. Here's what to do.
|
||||
@@ -8,9 +9,10 @@ to keep it up-to-date.
|
||||
### 1. Pull from master and create a release branch
|
||||
|
||||
```bash
|
||||
export VERSION=vXXX
|
||||
export VERSION=vX.Y.Z
|
||||
git checkout release
|
||||
git pull
|
||||
git fetch
|
||||
git reset --hard origin/release
|
||||
git checkout -b bump_$VERSION
|
||||
git merge origin/master
|
||||
```
|
||||
@@ -20,16 +22,13 @@ git merge origin/master
|
||||
You can run this command for reference:
|
||||
|
||||
```bash
|
||||
LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1)
|
||||
git log $LAST_VERSION..HEAD
|
||||
LAST_VERSION=$(git tag | grep -E 'v[0-9\.]+$' | sort -nr | head -n 1)
|
||||
git log --stat $LAST_VERSION..HEAD
|
||||
```
|
||||
|
||||
Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
|
||||
Each change should be listed under a category heading formatted as `#### CATEGORY`.
|
||||
|
||||
* BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix,
|
||||
new feature or upgrade, respectively.
|
||||
|
||||
* CATEGORY should describe which part of the project is affected.
|
||||
`CATEGORY` should describe which part of the project is affected.
|
||||
Valid categories are:
|
||||
* Builder
|
||||
* Documentation
|
||||
@@ -37,19 +36,34 @@ Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
|
||||
* Packaging
|
||||
* Remote API
|
||||
* Runtime
|
||||
* Other (please use this category sparingly)
|
||||
|
||||
* DESCRIPTION: a concise description of the change that is relevant to the
|
||||
end-user, using the present tense. Changes should be described in terms
|
||||
of how they affect the user, for example "new feature X which allows Y",
|
||||
"fixed bug which caused X", "increased performance of Y".
|
||||
Each change should be formatted as `BULLET DESCRIPTION`, given:
|
||||
|
||||
* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or
|
||||
upgrade, respectively.
|
||||
|
||||
* DESCRIPTION: a concise description of the change that is relevant to the
|
||||
end-user, using the present tense. Changes should be described in terms
|
||||
of how they affect the user, for example "Add new feature X which allows Y",
|
||||
"Fix bug which caused X", "Increase performance of Y".
|
||||
|
||||
EXAMPLES:
|
||||
|
||||
```
|
||||
+ Builder: 'docker build -t FOO' applies the tag FOO to the newly built
|
||||
container.
|
||||
* Runtime: improve detection of kernel version
|
||||
- Remote API: fix a bug in the optional unix socket transport
|
||||
```markdown
|
||||
## 0.3.6 (1995-12-25)
|
||||
|
||||
#### Builder
|
||||
|
||||
+ 'docker build -t FOO .' applies the tag FOO to the newly built container
|
||||
|
||||
#### Remote API
|
||||
|
||||
- Fix a bug in the optional unix socket transport
|
||||
|
||||
#### Runtime
|
||||
|
||||
* Improve detection of kernel version
|
||||
```
|
||||
|
||||
### 3. Change the contents of the VERSION file
|
||||
@@ -61,14 +75,14 @@ echo ${VERSION#v} > VERSION
|
||||
### 4. Run all tests
|
||||
|
||||
```bash
|
||||
docker run -privileged docker hack/make.sh test
|
||||
make test
|
||||
```
|
||||
|
||||
### 5. Test the docs
|
||||
|
||||
Make sure that your tree includes documentation for any modified or
|
||||
new features, syntax or semantic changes. Instructions for building
|
||||
the docs are in ``docs/README.md``
|
||||
the docs are in `docs/README.md`.
|
||||
|
||||
### 6. Commit and create a pull request to the "release" branch
|
||||
|
||||
@@ -76,44 +90,32 @@ the docs are in ``docs/README.md``
|
||||
git add VERSION CHANGELOG.md
|
||||
git commit -m "Bump version to $VERSION"
|
||||
git push origin bump_$VERSION
|
||||
echo "https://github.com/dotcloud/docker/compare/release...bump_$VERSION"
|
||||
```
|
||||
|
||||
That last command will give you the proper link to visit to ensure that you
|
||||
open the PR against the "release" branch instead of accidentally against
|
||||
"master" (like so many brave souls before you already have).
|
||||
|
||||
### 7. Get 2 other maintainers to validate the pull request
|
||||
|
||||
### 8. Apply tag
|
||||
|
||||
```bash
|
||||
git tag -a $VERSION -m $VERSION bump_$VERSION
|
||||
git push origin $VERSION
|
||||
```
|
||||
|
||||
Merging the pull request to the release branch will automatically
|
||||
update the documentation on the "latest" revision of the docs. You
|
||||
should see the updated docs 5-10 minutes after the merge. The docs
|
||||
will appear on http://docs.docker.io/. For more information about
|
||||
documentation releases, see ``docs/README.md``
|
||||
|
||||
### 9. Go to github to merge the bump_$VERSION into release
|
||||
|
||||
Don't forget to push that pretty blue button to delete the leftover
|
||||
branch afterwards!
|
||||
|
||||
### 10. Publish binaries
|
||||
### 8. Publish binaries
|
||||
|
||||
To run this you will need access to the release credentials.
|
||||
Get them from [the infrastructure maintainers](
|
||||
https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
|
||||
|
||||
```bash
|
||||
git checkout release
|
||||
git fetch
|
||||
git reset --hard origin/release
|
||||
docker build -t docker .
|
||||
docker run \
|
||||
export AWS_S3_BUCKET="test.docker.io"
|
||||
export AWS_ACCESS_KEY="$(cat ~/.aws/access_key)"
|
||||
export AWS_SECRET_KEY="$(cat ~/.aws/secret_key)"
|
||||
export GPG_PASSPHRASE=supersecretsesame
|
||||
docker run \
|
||||
-e AWS_S3_BUCKET=test.docker.io \
|
||||
-e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
|
||||
-e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
|
||||
-e GPG_PASSPHRASE=supersecretsesame \
|
||||
-e AWS_ACCESS_KEY \
|
||||
-e AWS_SECRET_KEY \
|
||||
-e GPG_PASSPHRASE \
|
||||
-i -t -privileged \
|
||||
docker \
|
||||
hack/release.sh
|
||||
@@ -121,9 +123,78 @@ docker run \
|
||||
|
||||
It will run the test suite one more time, build the binaries and packages,
|
||||
and upload to the specified bucket (you should use test.docker.io for
|
||||
general testing, and once everything is fine, switch to get.docker.io).
|
||||
general testing, and once everything is fine, switch to get.docker.io as
|
||||
noted below).
|
||||
|
||||
### 11. Rejoice and Evangelize!
|
||||
After the binaries and packages are uploaded to test.docker.io, make sure
|
||||
they get tested in both Ubuntu and Debian for any obvious installation
|
||||
issues or runtime issues.
|
||||
|
||||
Announcing on IRC in both `#docker` and `#docker-dev` is a great way to get
|
||||
help testing! An easy way to get some useful links for sharing:
|
||||
|
||||
```bash
|
||||
echo "Ubuntu/Debian install script: curl -sLS https://test.docker.io/ | sh"
|
||||
echo "Linux 64bit binary: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}"
|
||||
echo "Darwin/OSX 64bit client binary: https://test.docker.io/builds/Darwin/x86_64/docker-${VERSION#v}"
|
||||
echo "Darwin/OSX 32bit client binary: https://test.docker.io/builds/Darwin/i386/docker-${VERSION#v}"
|
||||
echo "Linux 64bit tgz: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}.tgz"
|
||||
```
|
||||
|
||||
Once they're tested and reasonably believed to be working, run against
|
||||
get.docker.io:
|
||||
|
||||
```bash
|
||||
docker run \
|
||||
-e AWS_S3_BUCKET=get.docker.io \
|
||||
-e AWS_ACCESS_KEY \
|
||||
-e AWS_SECRET_KEY \
|
||||
-e GPG_PASSPHRASE \
|
||||
-i -t -privileged \
|
||||
docker \
|
||||
hack/release.sh
|
||||
```
|
||||
|
||||
### 9. Apply tag
|
||||
|
||||
```bash
|
||||
git tag -a $VERSION -m $VERSION bump_$VERSION
|
||||
git push origin $VERSION
|
||||
```
|
||||
|
||||
It's very important that we don't make the tag until after the official
|
||||
release is uploaded to get.docker.io!
|
||||
|
||||
### 10. Go to github to merge the `bump_$VERSION` into release
|
||||
|
||||
Merging the pull request to the release branch will automatically
|
||||
update the documentation on the "latest" revision of the docs. You
|
||||
should see the updated docs 5-10 minutes after the merge. The docs
|
||||
will appear on http://docs.docker.io/. For more information about
|
||||
documentation releases, see `docs/README.md`.
|
||||
|
||||
Don't forget to push that pretty blue button to delete the leftover
|
||||
branch afterwards!
|
||||
|
||||
### 11. Create a new pull request to merge release back into master
|
||||
|
||||
```bash
|
||||
git checkout master
|
||||
git fetch
|
||||
git reset --hard origin/master
|
||||
git merge origin/release
|
||||
git checkout -b merge_release_$VERSION
|
||||
echo ${VERSION#v}-dev > VERSION
|
||||
git add VERSION
|
||||
git commit -m "Change version to $(cat VERSION)"
|
||||
git push origin merge_release_$VERSION
|
||||
echo "https://github.com/dotcloud/docker/compare/master...merge_release_$VERSION"
|
||||
```
|
||||
|
||||
Again, get two maintainers to validate, then merge, then push that pretty
|
||||
blue button to delete your branch.
|
||||
|
||||
### 12. Rejoice and Evangelize!
|
||||
|
||||
Congratulations! You're done.
|
||||
|
||||
|
||||
@@ -127,7 +127,7 @@ call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
|
||||
|
||||
# Install Docker and Buildbot dependencies
|
||||
sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
|
||||
sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
|
||||
sudo('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9')
|
||||
sudo('echo deb https://get.docker.io/ubuntu docker main >'
|
||||
' /etc/apt/sources.list.d/docker.list')
|
||||
sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
|
||||
|
||||
@@ -108,7 +108,11 @@ case "$lsb_dist" in
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
$sh_c "$curl ${url}gpg | apt-key add -"
|
||||
if [ "https://get.docker.io/" = "$url" ]; then
|
||||
$sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9"
|
||||
else
|
||||
$sh_c "$curl ${url}gpg | apt-key add -"
|
||||
fi
|
||||
$sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
$sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker'
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ set -e
|
||||
# DO NOT CALL THIS SCRIPT DIRECTLY.
|
||||
# - The right way to call this script is to invoke "make" from
|
||||
# your checkout of the Docker repository.
|
||||
# the Makefile will so a "docker build -t docker ." and then
|
||||
# the Makefile will do a "docker build -t docker ." and then
|
||||
# "docker run hack/make.sh" in the resulting container image.
|
||||
#
|
||||
|
||||
|
||||
@@ -245,7 +245,7 @@ EOF
|
||||
# Add the repository to your APT sources
|
||||
echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
|
||||
# Then import the repository key
|
||||
curl $(s3_url)/gpg | apt-key add -
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
# Install docker
|
||||
apt-get update ; apt-get install -y lxc-docker
|
||||
|
||||
|
||||
49
hack/travis/dco.py
Executable file
49
hack/travis/dco.py
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python
|
||||
import re
|
||||
import subprocess
|
||||
import yaml
|
||||
|
||||
from env import commit_range
|
||||
|
||||
commit_format = '-%n hash: "%h"%n author: %aN <%aE>%n message: |%n%w(0,2,2)%B'
|
||||
|
||||
gitlog = subprocess.check_output([
|
||||
'git', 'log', '--reverse',
|
||||
'--format=format:'+commit_format,
|
||||
'..'.join(commit_range), '--',
|
||||
])
|
||||
|
||||
commits = yaml.load(gitlog)
|
||||
if not commits:
|
||||
exit(0) # what? how can we have no commits?
|
||||
|
||||
DCO = 'Docker-DCO-1.1-Signed-off-by:'
|
||||
|
||||
p = re.compile(r'^{0} ([^<]+) <([^<>@]+@[^<>]+)> \(github: (\S+)\)$'.format(re.escape(DCO)), re.MULTILINE|re.UNICODE)
|
||||
|
||||
failed_commits = 0
|
||||
|
||||
for commit in commits:
|
||||
commit['stat'] = subprocess.check_output([
|
||||
'git', 'log', '--format=format:', '--max-count=1',
|
||||
'--name-status', commit['hash'], '--',
|
||||
])
|
||||
if commit['stat'] == '':
|
||||
print 'Commit {0} has no actual changed content, skipping.'.format(commit['hash'])
|
||||
continue
|
||||
|
||||
m = p.search(commit['message'])
|
||||
if not m:
|
||||
print 'Commit {1} does not have a properly formatted "{0}" marker.'.format(DCO, commit['hash'])
|
||||
failed_commits += 1
|
||||
continue # print ALL the commits that don't have a proper DCO
|
||||
|
||||
(name, email, github) = m.groups()
|
||||
|
||||
# TODO verify that "github" is the person who actually made this commit via the GitHub API
|
||||
|
||||
if failed_commits > 0:
|
||||
exit(failed_commits)
|
||||
|
||||
print 'All commits have a valid "{0}" marker.'.format(DCO)
|
||||
exit(0)
|
||||
21
hack/travis/env.py
Normal file
21
hack/travis/env.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
if 'TRAVIS' not in os.environ:
|
||||
print 'TRAVIS is not defined; this should run in TRAVIS. Sorry.'
|
||||
exit(127)
|
||||
|
||||
if os.environ['TRAVIS_PULL_REQUEST'] != 'false':
|
||||
commit_range = [os.environ['TRAVIS_BRANCH'], 'FETCH_HEAD']
|
||||
else:
|
||||
try:
|
||||
subprocess.check_call([
|
||||
'git', 'log', '-1', '--format=format:',
|
||||
os.environ['TRAVIS_COMMIT_RANGE'], '--',
|
||||
])
|
||||
commit_range = os.environ['TRAVIS_COMMIT_RANGE'].split('...')
|
||||
if len(commit_range) == 1: # if it didn't split, it must have been separated by '..' instead
|
||||
commit_range = commit_range[0].split('..')
|
||||
except subprocess.CalledProcessError:
|
||||
print 'TRAVIS_COMMIT_RANGE is invalid. This seems to be a force push. We will just assume it must be against upstream master and compare all commits in between.'
|
||||
commit_range = ['upstream/master', 'HEAD']
|
||||
28
hack/travis/gofmt.py
Executable file
28
hack/travis/gofmt.py
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env python
|
||||
import subprocess
|
||||
|
||||
from env import commit_range
|
||||
|
||||
files = subprocess.check_output([
|
||||
'git', 'diff', '--diff-filter=ACMR',
|
||||
'--name-only', '...'.join(commit_range), '--',
|
||||
])
|
||||
|
||||
exit_status = 0
|
||||
|
||||
for filename in files.split('\n'):
|
||||
if filename.endswith('.go'):
|
||||
try:
|
||||
out = subprocess.check_output(['gofmt', '-s', '-l', filename])
|
||||
if out != '':
|
||||
print out,
|
||||
exit_status = 1
|
||||
except subprocess.CalledProcessError:
|
||||
exit_status = 1
|
||||
|
||||
if exit_status != 0:
|
||||
print 'Reformat the files listed above with "gofmt -s -w" and try again.'
|
||||
exit(exit_status)
|
||||
|
||||
print 'All files pass gofmt.'
|
||||
exit(0)
|
||||
@@ -49,6 +49,10 @@ func TestGetVersion(t *testing.T) {
|
||||
if result := v.Get("Version"); result != expected {
|
||||
t.Errorf("Expected version %s, %s found", expected, result)
|
||||
}
|
||||
expected = "application/json"
|
||||
if result := r.HeaderMap.Get("Content-Type"); result != expected {
|
||||
t.Errorf("Expected Content-Type %s, %s found", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInfo(t *testing.T) {
|
||||
@@ -84,6 +88,10 @@ func TestGetInfo(t *testing.T) {
|
||||
if images := i.GetInt("Images"); images != len(initialImages) {
|
||||
t.Errorf("Expected images: %d, %d found", len(initialImages), images)
|
||||
}
|
||||
expected := "application/json"
|
||||
if result := r.HeaderMap.Get("Content-Type"); result != expected {
|
||||
t.Errorf("Expected Content-Type %s, %s found", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEvents(t *testing.T) {
|
||||
|
||||
@@ -132,6 +132,23 @@ run [ "$(cat /e)" = "blah" ]
|
||||
[][2]string{{"/x", "hello"}, {"/", "blah"}},
|
||||
},
|
||||
|
||||
// Comments, shebangs, and executability, oh my!
|
||||
{
|
||||
`
|
||||
FROM {IMAGE}
|
||||
# This is an ordinary comment.
|
||||
RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
|
||||
RUN [ ! -x /hello.sh ]
|
||||
RUN chmod +x /hello.sh
|
||||
RUN [ -x /hello.sh ]
|
||||
RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
|
||||
RUN [ "$(/hello.sh)" = "hello world" ]
|
||||
`,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
|
||||
// Environment variable
|
||||
{
|
||||
`
|
||||
from {IMAGE}
|
||||
@@ -142,6 +159,19 @@ run [ "$FOO" = "BAR" ]
|
||||
nil,
|
||||
},
|
||||
|
||||
// Environment overwriting
|
||||
{
|
||||
`
|
||||
from {IMAGE}
|
||||
env FOO BAR
|
||||
run [ "$FOO" = "BAR" ]
|
||||
env FOO BAZ
|
||||
run [ "$FOO" = "BAZ" ]
|
||||
`,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
|
||||
{
|
||||
`
|
||||
from {IMAGE}
|
||||
@@ -391,6 +421,8 @@ func TestBuildEntrypoint(t *testing.T) {
|
||||
}
|
||||
|
||||
if img.Config.Entrypoint[0] != "/bin/echo" {
|
||||
t.Log(img.Config.Entrypoint[0])
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -425,7 +457,7 @@ func TestBuildEntrypointRunCleanup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) {
|
||||
func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bool) (imageId string) {
|
||||
eng := NewTestEngine(t)
|
||||
defer nuke(mkRuntimeFromEngine(eng, t))
|
||||
|
||||
@@ -434,20 +466,36 @@ func checkCacheBehavior(t *testing.T, template testContextTemplate, expectHit bo
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imageId := img.ID
|
||||
imageId = img.ID
|
||||
|
||||
img = nil
|
||||
img, err = buildImage(template, t, eng, expectHit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hit := imageId == img.ID
|
||||
if hit != expectHit {
|
||||
t.Logf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)",
|
||||
hit, expectHit, imageId, img.ID)
|
||||
t.Fail()
|
||||
if hit := imageId == img.ID; hit != expectHit {
|
||||
t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkCacheBehaviorFromEngime(t *testing.T, template testContextTemplate, expectHit bool, eng *engine.Engine) (imageId string) {
|
||||
img, err := buildImage(template, t, eng, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imageId = img.ID
|
||||
|
||||
img, err = buildImage(template, t, eng, expectHit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if hit := imageId == img.ID; hit != expectHit {
|
||||
t.Fatalf("Cache misbehavior, got hit=%t, expected hit=%t: (first: %s, second %s)", hit, expectHit, imageId, img.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestBuildImageWithCache(t *testing.T) {
|
||||
@@ -474,11 +522,61 @@ func TestBuildADDLocalFileWithCache(t *testing.T) {
|
||||
maintainer dockerio
|
||||
run echo "first"
|
||||
add foo /usr/lib/bla/bar
|
||||
run [ "$(cat /usr/lib/bla/bar)" = "hello" ]
|
||||
run echo "second"
|
||||
add . /src/
|
||||
run [ "$(cat /src/foo)" = "hello" ]
|
||||
`,
|
||||
[][2]string{{"foo", "hello"}},
|
||||
[][2]string{
|
||||
{"foo", "hello"},
|
||||
},
|
||||
nil}
|
||||
checkCacheBehavior(t, template, true)
|
||||
eng := NewTestEngine(t)
|
||||
defer nuke(mkRuntimeFromEngine(eng, t))
|
||||
|
||||
id1 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
template.files = append(template.files, [2]string{"bar", "hello2"})
|
||||
id2 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id1 == id2 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
id3 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id2 != id3 {
|
||||
t.Fatal("The cache should have been used but hasn't.")
|
||||
}
|
||||
template.files[1][1] = "hello3"
|
||||
id4 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id3 == id4 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
template.dockerfile += `
|
||||
add ./bar /src2/
|
||||
run ls /src2/bar
|
||||
`
|
||||
id5 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id4 == id5 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
template.files[1][1] = "hello4"
|
||||
id6 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id5 == id6 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
|
||||
template.dockerfile += `
|
||||
add bar /src2/bar2
|
||||
add /bar /src2/bar3
|
||||
run ls /src2/bar2 /src2/bar3
|
||||
`
|
||||
id7 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id6 == id7 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
template.files[1][1] = "hello5"
|
||||
id8 := checkCacheBehaviorFromEngime(t, template, true, eng)
|
||||
if id7 == id8 {
|
||||
t.Fatal("The cache should have been invalided but hasn't.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildADDLocalFileWithoutCache(t *testing.T) {
|
||||
@@ -494,6 +592,26 @@ func TestBuildADDLocalFileWithoutCache(t *testing.T) {
|
||||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
func TestBuildADDCurrentDirectoryWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
add . /usr/lib/bla
|
||||
`,
|
||||
nil, nil}
|
||||
checkCacheBehavior(t, template, true)
|
||||
}
|
||||
|
||||
func TestBuildADDCurrentDirectoryWithoutCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
add . /usr/lib/bla
|
||||
`,
|
||||
nil, nil}
|
||||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
func TestBuildADDRemoteFileWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/iptables"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
2
links.go
2
links.go
@@ -2,7 +2,7 @@ package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/iptables"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -4,6 +4,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func GetMounts() ([]*MountInfo, error) {
|
||||
return parseMountTable()
|
||||
}
|
||||
|
||||
// Looks at /proc/self/mountinfo to determine of the specified
|
||||
// mountpoint has been mounted
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
@@ -14,7 +18,7 @@ func Mounted(mountpoint string) (bool, error) {
|
||||
|
||||
// Search the table for the mountpoint
|
||||
for _, e := range entries {
|
||||
if e.mountpoint == mountpoint {
|
||||
if e.Mountpoint == mountpoint {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,22 +5,35 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// We only parse upto the mountinfo because that is all we
|
||||
// care about right now
|
||||
mountinfoFormat = "%d %d %d:%d %s %s %s"
|
||||
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||
(3) major:minor: value of st_dev for files on filesystem
|
||||
(4) root: root of the mount within the filesystem
|
||||
(5) mount point: mount point relative to the process's root
|
||||
(6) mount options: per mount options
|
||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||
(8) separator: marks the end of the optional fields
|
||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||
(10) mount source: filesystem specific information or "none"
|
||||
(11) super options: per super block options*/
|
||||
mountinfoFormat = "%d %d %d:%d %s %s %s "
|
||||
)
|
||||
|
||||
// Represents one line from /proc/self/mountinfo
|
||||
type procEntry struct {
|
||||
id, parent, major, minor int
|
||||
source, mountpoint, opts string
|
||||
type MountInfo struct {
|
||||
Id, Parent, Major, Minor int
|
||||
Root, Mountpoint, Opts string
|
||||
Fstype, Source, VfsOpts string
|
||||
}
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
|
||||
func parseMountTable() ([]*procEntry, error) {
|
||||
func parseMountTable() ([]*MountInfo, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -30,10 +43,10 @@ func parseMountTable() ([]*procEntry, error) {
|
||||
return parseInfoFile(f)
|
||||
}
|
||||
|
||||
func parseInfoFile(r io.Reader) ([]*procEntry, error) {
|
||||
func parseInfoFile(r io.Reader) ([]*MountInfo, error) {
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []*procEntry{}
|
||||
out = []*MountInfo{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
@@ -42,14 +55,24 @@ func parseInfoFile(r io.Reader) ([]*procEntry, error) {
|
||||
}
|
||||
|
||||
var (
|
||||
p = &procEntry{}
|
||||
p = &MountInfo{}
|
||||
text = s.Text()
|
||||
)
|
||||
|
||||
if _, err := fmt.Sscanf(text, mountinfoFormat,
|
||||
&p.id, &p.parent, &p.major, &p.minor,
|
||||
&p.source, &p.mountpoint, &p.opts); err != nil {
|
||||
&p.Id, &p.Parent, &p.Major, &p.Minor,
|
||||
&p.Root, &p.Mountpoint, &p.Opts); err != nil {
|
||||
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
|
||||
}
|
||||
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||
index := strings.Index(text, " - ")
|
||||
postSeparatorFields := strings.Fields(text[index+3:])
|
||||
if len(postSeparatorFields) != 3 {
|
||||
return nil, fmt.Errorf("Error did not find 3 fields post '-' in '%s'", text)
|
||||
}
|
||||
p.Fstype = postSeparatorFields[0]
|
||||
p.Source = postSeparatorFields[1]
|
||||
p.VfsOpts = postSeparatorFields[2]
|
||||
out = append(out, p)
|
||||
}
|
||||
return out, nil
|
||||
|
||||
17
network.go
17
network.go
@@ -4,7 +4,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/iptables"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/pkg/netlink"
|
||||
"github.com/dotcloud/docker/proxy"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
@@ -248,12 +248,12 @@ type PortMapper struct {
|
||||
}
|
||||
|
||||
func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error {
|
||||
mapKey := (&net.TCPAddr{Port: port, IP: ip}).String()
|
||||
if _, exists := mapper.tcpProxies[mapKey]; exists {
|
||||
return fmt.Errorf("Port %s is already in use", mapKey)
|
||||
}
|
||||
|
||||
if _, isTCP := backendAddr.(*net.TCPAddr); isTCP {
|
||||
mapKey := (&net.TCPAddr{Port: port, IP: ip}).String()
|
||||
if _, exists := mapper.tcpProxies[mapKey]; exists {
|
||||
return fmt.Errorf("TCP Port %s is already in use", mapKey)
|
||||
}
|
||||
backendPort := backendAddr.(*net.TCPAddr).Port
|
||||
backendIP := backendAddr.(*net.TCPAddr).IP
|
||||
if mapper.iptables != nil {
|
||||
@@ -270,6 +270,10 @@ func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error {
|
||||
mapper.tcpProxies[mapKey] = proxy
|
||||
go proxy.Run()
|
||||
} else {
|
||||
mapKey := (&net.UDPAddr{Port: port, IP: ip}).String()
|
||||
if _, exists := mapper.udpProxies[mapKey]; exists {
|
||||
return fmt.Errorf("UDP: Port %s is already in use", mapKey)
|
||||
}
|
||||
backendPort := backendAddr.(*net.UDPAddr).Port
|
||||
backendIP := backendAddr.(*net.UDPAddr).IP
|
||||
if mapper.iptables != nil {
|
||||
@@ -290,8 +294,8 @@ func (mapper *PortMapper) Map(ip net.IP, port int, backendAddr net.Addr) error {
|
||||
}
|
||||
|
||||
func (mapper *PortMapper) Unmap(ip net.IP, port int, proto string) error {
|
||||
mapKey := (&net.TCPAddr{Port: port, IP: ip}).String()
|
||||
if proto == "tcp" {
|
||||
mapKey := (&net.TCPAddr{Port: port, IP: ip}).String()
|
||||
backendAddr, ok := mapper.tcpMapping[mapKey]
|
||||
if !ok {
|
||||
return fmt.Errorf("Port tcp/%s is not mapped", mapKey)
|
||||
@@ -307,6 +311,7 @@ func (mapper *PortMapper) Unmap(ip net.IP, port int, proto string) error {
|
||||
}
|
||||
delete(mapper.tcpMapping, mapKey)
|
||||
} else {
|
||||
mapKey := (&net.UDPAddr{Port: port, IP: ip}).String()
|
||||
backendAddr, ok := mapper.udpMapping[mapKey]
|
||||
if !ok {
|
||||
return fmt.Errorf("Port udp/%s is not mapped", mapKey)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/iptables"
|
||||
"github.com/dotcloud/docker/pkg/iptables"
|
||||
"github.com/dotcloud/docker/proxy"
|
||||
"net"
|
||||
"testing"
|
||||
@@ -340,6 +340,7 @@ func NewStubProxy(frontendAddr, backendAddr net.Addr) (proxy.Proxy, error) {
|
||||
}
|
||||
|
||||
func TestPortMapper(t *testing.T) {
|
||||
// FIXME: is this iptables chain still used anywhere?
|
||||
var chain *iptables.Chain
|
||||
mapper := &PortMapper{
|
||||
tcpMapping: make(map[string]*net.TCPAddr),
|
||||
|
||||
1
pkg/iptables/MAINTAINERS
Normal file
1
pkg/iptables/MAINTAINERS
Normal file
@@ -0,0 +1 @@
|
||||
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
|
||||
@@ -40,7 +40,7 @@ var (
|
||||
// Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr.
|
||||
// Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla
|
||||
// Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat
|
||||
// Richard Feynmann was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman
|
||||
// Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman
|
||||
// Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike
|
||||
// Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking
|
||||
// Steve Wozniak invented the Apple I and Apple II. http://en.wikipedia.org/wiki/Steve_Wozniak
|
||||
@@ -49,7 +49,7 @@ var (
|
||||
// http://en.wikipedia.org/wiki/John_Bardeen
|
||||
// http://en.wikipedia.org/wiki/Walter_Houser_Brattain
|
||||
// http://en.wikipedia.org/wiki/William_Shockley
|
||||
right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclide", "newton", "fermat", "archimede", "poincare", "heisenberg", "feynmann", "hawkings", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"}
|
||||
right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"}
|
||||
)
|
||||
|
||||
func GenerateRandomName(checker NameChecker) (string, error) {
|
||||
|
||||
@@ -234,7 +234,7 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *NetlinkSocket) Recieve() ([]syscall.NetlinkMessage, error) {
|
||||
func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
|
||||
rb := make([]byte, syscall.Getpagesize())
|
||||
nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
|
||||
if err != nil {
|
||||
@@ -269,7 +269,7 @@ func (s *NetlinkSocket) HandleAck(seq uint32) error {
|
||||
|
||||
done:
|
||||
for {
|
||||
msgs, err := s.Recieve()
|
||||
msgs, err := s.Receive()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -500,7 +500,7 @@ func NetworkGetRoutes() ([]*net.IPNet, error) {
|
||||
|
||||
done:
|
||||
for {
|
||||
msgs, err := s.Recieve()
|
||||
msgs, err := s.Receive()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"os"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func CopyFile(dstFile, srcFile *os.File) error {
|
||||
|
||||
@@ -25,8 +25,8 @@ btrfs_reflink(int fd_out, int fd_in)
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
||||
@@ -25,11 +25,11 @@ var (
|
||||
ErrLoginRequired = errors.New("Authentication is required.")
|
||||
)
|
||||
|
||||
func pingRegistryEndpoint(endpoint string) error {
|
||||
func pingRegistryEndpoint(endpoint string) (bool, error) {
|
||||
if endpoint == auth.IndexServerAddress() {
|
||||
// Skip the check, we now this one is valid
|
||||
// (and we never want to fallback to http in case of error)
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
httpDial := func(proto string, addr string) (net.Conn, error) {
|
||||
// Set the connect timeout to 5 seconds
|
||||
@@ -45,14 +45,26 @@ func pingRegistryEndpoint(endpoint string) error {
|
||||
client := &http.Client{Transport: httpTransport}
|
||||
resp, err := client.Get(endpoint + "_ping")
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.Header.Get("X-Docker-Registry-Version") == "" {
|
||||
return errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)")
|
||||
return false, errors.New("This does not look like a Registry server (\"X-Docker-Registry-Version\" header not found in the response)")
|
||||
}
|
||||
return nil
|
||||
|
||||
standalone := resp.Header.Get("X-Docker-Registry-Standalone")
|
||||
utils.Debugf("Registry standalone header: '%s'", standalone)
|
||||
// If the header is absent, we assume true for compatibility with earlier
|
||||
// versions of the registry
|
||||
if standalone == "" {
|
||||
return true, nil
|
||||
// Accepted values are "true" (case-insensitive) and "1".
|
||||
} else if strings.EqualFold(standalone, "true") || standalone == "1" {
|
||||
return true, nil
|
||||
}
|
||||
// Otherwise, not standalone
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func validateRepositoryName(repositoryName string) error {
|
||||
@@ -122,16 +134,16 @@ func ExpandAndVerifyRegistryUrl(hostname string) (string, error) {
|
||||
// there is no path given. Expand with default path
|
||||
hostname = hostname + "/v1/"
|
||||
}
|
||||
if err := pingRegistryEndpoint(hostname); err != nil {
|
||||
if _, err := pingRegistryEndpoint(hostname); err != nil {
|
||||
return "", errors.New("Invalid Registry endpoint: " + err.Error())
|
||||
}
|
||||
return hostname, nil
|
||||
}
|
||||
endpoint := fmt.Sprintf("https://%s/v1/", hostname)
|
||||
if err := pingRegistryEndpoint(endpoint); err != nil {
|
||||
if _, err := pingRegistryEndpoint(endpoint); err != nil {
|
||||
utils.Debugf("Registry %s does not work (%s), falling back to http", endpoint, err)
|
||||
endpoint = fmt.Sprintf("http://%s/v1/", hostname)
|
||||
if err = pingRegistryEndpoint(endpoint); err != nil {
|
||||
if _, err = pingRegistryEndpoint(endpoint); err != nil {
|
||||
//TODO: triggering highland build can be done there without "failing"
|
||||
return "", errors.New("Invalid Registry endpoint: " + err.Error())
|
||||
}
|
||||
@@ -682,12 +694,18 @@ func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we're working with a private registry over HTTPS, send Basic Auth headers
|
||||
// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
|
||||
// alongside our requests.
|
||||
if indexEndpoint != auth.IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") {
|
||||
utils.Debugf("Endpoint %s is eligible for private registry auth. Enabling decorator.", indexEndpoint)
|
||||
dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password)
|
||||
factory.AddDecorator(dec)
|
||||
standalone, err := pingRegistryEndpoint(indexEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if standalone {
|
||||
utils.Debugf("Endpoint %s is eligible for private registry auth. Enabling decorator.", indexEndpoint)
|
||||
dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password)
|
||||
factory.AddDecorator(dec)
|
||||
}
|
||||
}
|
||||
|
||||
r.reqFactory = factory
|
||||
|
||||
@@ -23,10 +23,11 @@ func spawnTestRegistry(t *testing.T) *Registry {
|
||||
}
|
||||
|
||||
func TestPingRegistryEndpoint(t *testing.T) {
|
||||
err := pingRegistryEndpoint(makeURL("/v1/"))
|
||||
standalone, err := pingRegistryEndpoint(makeURL("/v1/"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqual(t, standalone, true, "Expected standalone to be true (default)")
|
||||
}
|
||||
|
||||
func TestGetRemoteHistory(t *testing.T) {
|
||||
|
||||
14
runtime.go
14
runtime.go
@@ -4,11 +4,12 @@ import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/pkg/graphdb"
|
||||
"github.com/dotcloud/docker/cgroups"
|
||||
"github.com/dotcloud/docker/graphdriver"
|
||||
"github.com/dotcloud/docker/graphdriver/aufs"
|
||||
_ "github.com/dotcloud/docker/graphdriver/devmapper"
|
||||
_ "github.com/dotcloud/docker/graphdriver/vfs"
|
||||
"github.com/dotcloud/docker/pkg/graphdb"
|
||||
"github.com/dotcloud/docker/utils"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -332,7 +333,7 @@ func (runtime *Runtime) restore() error {
|
||||
|
||||
// FIXME: comment please!
|
||||
func (runtime *Runtime) UpdateCapabilities(quiet bool) {
|
||||
if cgroupMemoryMountpoint, err := utils.FindCgroupMountpoint("memory"); err != nil {
|
||||
if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil {
|
||||
if !quiet {
|
||||
log.Printf("WARNING: %s\n", err)
|
||||
}
|
||||
@@ -582,11 +583,6 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// FIXME: this is deprecated by the getFullName *function*
|
||||
func (runtime *Runtime) getFullName(name string) (string, error) {
|
||||
return getFullName(name)
|
||||
}
|
||||
|
||||
func getFullName(name string) (string, error) {
|
||||
if name == "" {
|
||||
return "", fmt.Errorf("Container name cannot be empty")
|
||||
@@ -598,7 +594,7 @@ func getFullName(name string) (string, error) {
|
||||
}
|
||||
|
||||
func (runtime *Runtime) GetByName(name string) (*Container, error) {
|
||||
fullName, err := runtime.getFullName(name)
|
||||
fullName, err := getFullName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -614,7 +610,7 @@ func (runtime *Runtime) GetByName(name string) (*Container, error) {
|
||||
}
|
||||
|
||||
func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
|
||||
name, err := runtime.getFullName(name)
|
||||
name, err := getFullName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
33
server.go
33
server.go
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/archive"
|
||||
"github.com/dotcloud/docker/auth"
|
||||
"github.com/dotcloud/docker/cgroups"
|
||||
"github.com/dotcloud/docker/engine"
|
||||
"github.com/dotcloud/docker/pkg/graphdb"
|
||||
"github.com/dotcloud/docker/registry"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -702,7 +702,7 @@ func (srv *Server) ContainerTop(name, psArgs string) (*APITop, error) {
|
||||
if !container.State.IsRunning() {
|
||||
return nil, fmt.Errorf("Container %s is not running", name)
|
||||
}
|
||||
pids, err := utils.GetPidsForContainer(container.ID)
|
||||
pids, err := cgroups.GetPidsForContainer(container.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1437,7 +1437,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
|
||||
if container == nil {
|
||||
return fmt.Errorf("No such link: %s", name)
|
||||
}
|
||||
name, err := srv.runtime.getFullName(name)
|
||||
name, err := getFullName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1696,30 +1696,32 @@ func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error)
|
||||
}
|
||||
|
||||
// Store the tree in a map of map (map[parentId][childId])
|
||||
imageMap := make(map[string][]string)
|
||||
imageMap := make(map[string]map[string]struct{})
|
||||
for _, img := range images {
|
||||
imageMap[img.Parent] = append(imageMap[img.Parent], img.ID)
|
||||
if _, exists := imageMap[img.Parent]; !exists {
|
||||
imageMap[img.Parent] = make(map[string]struct{})
|
||||
}
|
||||
imageMap[img.Parent][img.ID] = struct{}{}
|
||||
}
|
||||
sort.Strings(imageMap[imgID])
|
||||
|
||||
// Loop on the children of the given image and check the config
|
||||
for _, elem := range imageMap[imgID] {
|
||||
var match *Image
|
||||
for elem := range imageMap[imgID] {
|
||||
img, err := srv.runtime.graph.Get(elem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if CompareConfig(&img.ContainerConfig, config) {
|
||||
return img, nil
|
||||
if match == nil || match.Created.Before(img.Created) {
|
||||
match = img
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
return match, nil
|
||||
}
|
||||
|
||||
func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
||||
func (srv *Server) RegisterLinks(container *Container, hostConfig *HostConfig) error {
|
||||
runtime := srv.runtime
|
||||
container := runtime.Get(name)
|
||||
if container == nil {
|
||||
return fmt.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
||||
if hostConfig != nil && hostConfig.Links != nil {
|
||||
for _, l := range hostConfig.Links {
|
||||
@@ -1792,8 +1794,7 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
||||
}
|
||||
}
|
||||
// Register any links from the host config before starting the container
|
||||
// FIXME: we could just pass the container here, no need to lookup by name again.
|
||||
if err := srv.RegisterLinks(name, &hostConfig); err != nil {
|
||||
if err := srv.RegisterLinks(container, &hostConfig); err != nil {
|
||||
job.Error(err)
|
||||
return engine.StatusErr
|
||||
}
|
||||
|
||||
1
utils.go
1
utils.go
@@ -322,7 +322,6 @@ func migratePortMappings(config *Config, hostConfig *HostConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// Links come in the format of
|
||||
// name:alias
|
||||
func parseLink(rawLink string) (map[string]string, error) {
|
||||
|
||||
@@ -15,16 +15,34 @@ import (
|
||||
|
||||
type TarSum struct {
|
||||
io.Reader
|
||||
tarR *tar.Reader
|
||||
tarW *tar.Writer
|
||||
gz *gzip.Writer
|
||||
bufTar *bytes.Buffer
|
||||
bufGz *bytes.Buffer
|
||||
h hash.Hash
|
||||
sums map[string]string
|
||||
currentFile string
|
||||
finished bool
|
||||
first bool
|
||||
tarR *tar.Reader
|
||||
tarW *tar.Writer
|
||||
gz writeCloseFlusher
|
||||
bufTar *bytes.Buffer
|
||||
bufGz *bytes.Buffer
|
||||
h hash.Hash
|
||||
sums map[string]string
|
||||
currentFile string
|
||||
finished bool
|
||||
first bool
|
||||
DisableCompression bool
|
||||
}
|
||||
|
||||
type writeCloseFlusher interface {
|
||||
io.WriteCloser
|
||||
Flush() error
|
||||
}
|
||||
|
||||
type nopCloseFlusher struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *TarSum) encodeHeader(h *tar.Header) error {
|
||||
@@ -57,7 +75,11 @@ func (ts *TarSum) Read(buf []byte) (int, error) {
|
||||
ts.bufGz = bytes.NewBuffer([]byte{})
|
||||
ts.tarR = tar.NewReader(ts.Reader)
|
||||
ts.tarW = tar.NewWriter(ts.bufTar)
|
||||
ts.gz = gzip.NewWriter(ts.bufGz)
|
||||
if !ts.DisableCompression {
|
||||
ts.gz = gzip.NewWriter(ts.bufGz)
|
||||
} else {
|
||||
ts.gz = &nopCloseFlusher{Writer: ts.bufGz}
|
||||
}
|
||||
ts.h = sha256.New()
|
||||
ts.h.Reset()
|
||||
ts.first = true
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"index/suffixarray"
|
||||
"io"
|
||||
@@ -46,14 +45,12 @@ func Go(f func() error) chan error {
|
||||
}
|
||||
|
||||
// Request a given URL and return an io.Reader
|
||||
func Download(url string) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
func Download(url string) (resp *http.Response, err error) {
|
||||
if resp, err = http.Get(url); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
|
||||
return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
@@ -583,28 +580,6 @@ func CompareKernelVersion(a, b *KernelVersionInfo) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func FindCgroupMountpoint(cgroupType string) (string, error) {
|
||||
output, err := ioutil.ReadFile("/proc/mounts")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// /proc/mounts has 6 fields per line, one mount per line, e.g.
|
||||
// cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0
|
||||
for _, line := range strings.Split(string(output), "\n") {
|
||||
parts := strings.Split(line, " ")
|
||||
if len(parts) == 6 && parts[2] == "cgroup" {
|
||||
for _, opt := range strings.Split(parts[3], ",") {
|
||||
if opt == cgroupType {
|
||||
return parts[1], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType)
|
||||
}
|
||||
|
||||
func GetKernelVersion() (*KernelVersionInfo, error) {
|
||||
var (
|
||||
err error
|
||||
@@ -803,7 +778,7 @@ func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (s
|
||||
host string
|
||||
port int
|
||||
)
|
||||
|
||||
addr = strings.TrimSpace(addr)
|
||||
switch {
|
||||
case strings.HasPrefix(addr, "unix://"):
|
||||
proto = "unix"
|
||||
@@ -814,6 +789,9 @@ func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (s
|
||||
case strings.HasPrefix(addr, "tcp://"):
|
||||
proto = "tcp"
|
||||
addr = strings.TrimPrefix(addr, "tcp://")
|
||||
case addr == "":
|
||||
proto = "unix"
|
||||
addr = defaultUnix
|
||||
default:
|
||||
if strings.Contains(addr, "://") {
|
||||
return "", fmt.Errorf("Invalid bind address protocol: %s", addr)
|
||||
@@ -1157,59 +1135,3 @@ func CopyFile(src, dst string) (int64, error) {
|
||||
defer df.Close()
|
||||
return io.Copy(df, sf)
|
||||
}
|
||||
|
||||
// Returns the relative path to the cgroup docker is running in.
|
||||
func GetThisCgroup(cgroupType string) (string, error) {
|
||||
output, err := ioutil.ReadFile("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, line := range strings.Split(string(output), "\n") {
|
||||
parts := strings.Split(line, ":")
|
||||
// any type used by docker should work
|
||||
if parts[1] == cgroupType {
|
||||
return parts[2], nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cgroup '%s' not found in /proc/self/cgroup", cgroupType)
|
||||
}
|
||||
|
||||
// Returns a list of pids for the given container.
|
||||
func GetPidsForContainer(id string) ([]int, error) {
|
||||
pids := []int{}
|
||||
|
||||
// memory is chosen randomly, any cgroup used by docker works
|
||||
cgroupType := "memory"
|
||||
|
||||
cgroupRoot, err := FindCgroupMountpoint(cgroupType)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
cgroupThis, err := GetThisCgroup(cgroupType)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
|
||||
filename := filepath.Join(cgroupRoot, cgroupThis, id, "tasks")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
// With more recent lxc versions use, cgroup will be in lxc/
|
||||
filename = filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks")
|
||||
}
|
||||
|
||||
output, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return pids, err
|
||||
}
|
||||
for _, p := range strings.Split(string(output), "\n") {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
pid, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
@@ -316,6 +316,9 @@ func TestParseHost(t *testing.T) {
|
||||
if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" {
|
||||
t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr)
|
||||
}
|
||||
if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" {
|
||||
t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr)
|
||||
}
|
||||
if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" {
|
||||
t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user