mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Compare commits
106 Commits
00166d05d9
...
v1.8.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0a8c2e3717 | ||
|
|
57e9c4f7e5 | ||
|
|
3e8da36017 | ||
|
|
1cce9a26a3 | ||
|
|
d7f8b4d43e | ||
|
|
6f7bbc3171 | ||
|
|
947087fb24 | ||
|
|
ffe7e48ed6 | ||
|
|
eeecd1cf59 | ||
|
|
3f411db15b | ||
|
|
789197f33d | ||
|
|
0c71d09921 | ||
|
|
cc8320cb58 | ||
|
|
c22b292719 | ||
|
|
14d2083f14 | ||
|
|
ea56c5e1ce | ||
|
|
341ff018a2 | ||
|
|
e07819293a | ||
|
|
6ec8d40ae7 | ||
|
|
16d64608f3 | ||
|
|
00a27b6872 | ||
|
|
fc12b9ddce | ||
|
|
b66e5ef208 | ||
|
|
d12ea79c9d | ||
|
|
a9aaa66780 | ||
|
|
e19060dcea | ||
|
|
b0e0dbb33b | ||
|
|
0d03096b65 | ||
|
|
55e9551aaa | ||
|
|
c65afe6ba8 | ||
|
|
5745aaed22 | ||
|
|
b6f0f93c94 | ||
|
|
33b16fef43 | ||
|
|
9705c349c5 | ||
|
|
3de34af5d1 | ||
|
|
b3f3abfc94 | ||
|
|
783baec49c | ||
|
|
c1d9e7c6fb | ||
|
|
83f6dbe30a | ||
|
|
a97b89b585 | ||
|
|
29ea36a880 | ||
|
|
ed672d1609 | ||
|
|
5916664220 | ||
|
|
da4b336233 | ||
|
|
74df05ccaa | ||
|
|
2c875215b1 | ||
|
|
be40a48c12 | ||
|
|
85f7f7cfc7 | ||
|
|
e15f6fca3f | ||
|
|
d3bbaa70cd | ||
|
|
cc6f6cb2e2 | ||
|
|
c967dd289f | ||
|
|
7895ec25ea | ||
|
|
5b06c94701 | ||
|
|
5851e2da60 | ||
|
|
9eff33735a | ||
|
|
fc7697b050 | ||
|
|
1bf8954d0d | ||
|
|
dfd9f5989a | ||
|
|
d9581e861d | ||
|
|
c383ceaf37 | ||
|
|
948912f692 | ||
|
|
d19b1b927b | ||
|
|
53f5905379 | ||
|
|
60cbf4da6c | ||
|
|
183628388c | ||
|
|
fbd2267e7d | ||
|
|
a16ab243e5 | ||
|
|
b3c3c4cddc | ||
|
|
0fe5aad984 | ||
|
|
0f5e2fd479 | ||
|
|
2f7145b1c5 | ||
|
|
81efe1f32e | ||
|
|
5ba75ac343 | ||
|
|
290987fcb4 | ||
|
|
98855c863d | ||
|
|
b1f394a247 | ||
|
|
a819a60a94 | ||
|
|
33cdc7f2c4 | ||
|
|
117860577c | ||
|
|
b0ac5df367 | ||
|
|
c109095a58 | ||
|
|
d394113dfe | ||
|
|
2af7f63173 | ||
|
|
f156fb7be5 | ||
|
|
559043b953 | ||
|
|
ba8abcb3dd | ||
|
|
ebf396c6e8 | ||
|
|
47d52fb872 | ||
|
|
d167338876 | ||
|
|
e6844381f0 | ||
|
|
589922adf0 | ||
|
|
689c4e6075 | ||
|
|
43da1adedb | ||
|
|
686fe02020 | ||
|
|
1d02be1c7a | ||
|
|
edb60b950a | ||
|
|
e0e852ee6f | ||
|
|
b537508f8c | ||
|
|
37e886eb7b | ||
|
|
50f65742ef | ||
|
|
56d859d052 | ||
|
|
546a704c63 | ||
|
|
fa85dc0030 | ||
|
|
36b6e5884d | ||
|
|
90991ddb9b |
124
CHANGELOG.md
124
CHANGELOG.md
@@ -1,5 +1,129 @@
|
||||
# Changelog
|
||||
|
||||
## 1.8.2 (2015-09-10)
|
||||
|
||||
### Distribution:
|
||||
|
||||
- Fixes rare edge case of handling GNU LongLink and LongName entries.
|
||||
- Fix ^C on docker pull.
|
||||
- Fix docker pull issues on client disconnection.
|
||||
- Fix issue that caused the daemon to panic when loggers weren't configured properly.
|
||||
- Fix goroutine leak pulling images from registry V2.
|
||||
|
||||
### Runtime:
|
||||
|
||||
- Fix a bug mounting cgroups for docker daemons running inside docker containers.
|
||||
- Initialize log configuration properly.
|
||||
|
||||
### Client:
|
||||
|
||||
- Handle `-q` flag in `docker ps` properly when there is a default format.
|
||||
|
||||
### Networking:
|
||||
|
||||
- Fix several corner cases with netlink.
|
||||
|
||||
### Contrib:
|
||||
|
||||
- Fix several issues with bash completion.
|
||||
|
||||
## 1.8.1 (2015-08-12)
|
||||
|
||||
### Distribution
|
||||
|
||||
- Fix a bug where pushing multiple tags would result in invalid images
|
||||
|
||||
## 1.8.0 (2015-08-11)
|
||||
|
||||
### Distribution
|
||||
|
||||
+ Trusted pull, push and build, disabled by default
|
||||
* Make tar layers deterministic between registries
|
||||
* Don't allow deleting the image of running containers
|
||||
* Check if a tag name to load is a valid digest
|
||||
* Allow one character repository names
|
||||
* Add a more accurate error description for invalid tag name
|
||||
* Make build cache ignore mtime
|
||||
|
||||
### Cli
|
||||
|
||||
+ Add support for DOCKER_CONFIG/--config to specify config file dir
|
||||
+ Add --type flag for docker inspect command
|
||||
+ Add formatting options to `docker ps` with `--format`
|
||||
+ Replace `docker -d` with new subcommand `docker daemon`
|
||||
* Zsh completion updates and improvements
|
||||
* Add some missing events to bash completion
|
||||
* Support daemon urls with base paths in `docker -H`
|
||||
* Validate status= filter to docker ps
|
||||
* Display when a container is in --net=host in docker ps
|
||||
* Extend docker inspect to export image metadata related to graph driver
|
||||
* Restore --default-gateway{,-v6} daemon options
|
||||
* Add missing unpublished ports in docker ps
|
||||
* Allow duration strings in `docker events` as --since/--until
|
||||
* Expose more mounts information in `docker inspect`
|
||||
|
||||
### Runtime
|
||||
|
||||
+ Add new Fluentd logging driver
|
||||
+ Allow `docker import` to load from local files
|
||||
+ Add logging driver for GELF via UDP
|
||||
+ Allow to copy files from host to containers with `docker cp`
|
||||
+ Promote volume drivers from experimental to master
|
||||
+ Add rollover log driver, and --log-driver-opts flag
|
||||
+ Add memory swappiness tuning options
|
||||
* Remove cgroup read-only flag when privileged
|
||||
* Make /proc, /sys, & /dev readonly for readonly containers
|
||||
* Add cgroup bind mount by default
|
||||
* Overlay: Export metadata for container and image in `docker inspect`
|
||||
* Devicemapper: external device activation
|
||||
* Devicemapper: Compare uuid of base device on startup
|
||||
* Remove RC4 from the list of registry cipher suites
|
||||
* Add syslog-facility option
|
||||
* LXC execdriver compatibility with recent LXC versions
|
||||
* Mark LXC execriver as deprecated (to be removed with the migration to runc)
|
||||
|
||||
### Plugins
|
||||
|
||||
* Separate plugin sockets and specs locations
|
||||
* Allow TLS connections to plugins
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Add missing 'Names' field to /containers/json API output
|
||||
- Make `docker rmi --dangling` safe when pulling
|
||||
- Devicemapper: Change default basesize to 100G
|
||||
- Go Scheduler issue with sync.Mutex and gcc
|
||||
- Fix issue where Search API endpoint would panic due to empty AuthConfig
|
||||
- Set image canonical names correctly
|
||||
- Check dockerinit only if lxc driver is used
|
||||
- Fix ulimit usage of nproc
|
||||
- Always attach STDIN if -i,--interactive is specified
|
||||
- Show error messages when saving container state fails
|
||||
- Fixed incorrect assumption on --bridge=none treated as disable network
|
||||
- Check for invalid port specifications in host configuration
|
||||
- Fix endpoint leave failure for --net=host mode
|
||||
- Fix goroutine leak in the stats API if the container is not running
|
||||
- Check for apparmor file before reading it
|
||||
- Fix DOCKER_TLS_VERIFY being ignored
|
||||
- Set umask to the default on startup
|
||||
- Correct the message of pause and unpause a non-running container
|
||||
- Adjust disallowed CpuShares in container creation
|
||||
- ZFS: correctly apply selinux context
|
||||
- Display empty string instead of <nil> when IP opt is nil
|
||||
- `docker kill` returns error when container is not running
|
||||
- Fix COPY/ADD quoted/json form
|
||||
- Fix goroutine leak on logs -f with no output
|
||||
- Remove panic in nat package on invalid hostport
|
||||
- Fix container linking in Fedora 22
|
||||
- Fix error caused using default gateways outside of the allocated range
|
||||
- Format times in inspect command with a template as RFC3339Nano
|
||||
- Make registry client to accept 2xx and 3xx http status responses as successful
|
||||
- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order.
|
||||
- Fix error when the docker ps format was not valid.
|
||||
- Remove redundant ip forward check.
|
||||
- Fix issue trying to push images to repository mirrors.
|
||||
- Fix error cleaning up network entrypoints when there is an initialization issue.
|
||||
|
||||
## 1.7.1 (2015-07-14)
|
||||
|
||||
#### Runtime
|
||||
|
||||
@@ -127,7 +127,7 @@ RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
|
||||
|
||||
# Install registry
|
||||
ENV REGISTRY_COMMIT 2317f721a3d8428215a2b65da4ae85212ed473b4
|
||||
ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
@@ -137,7 +137,7 @@ RUN set -x \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary server
|
||||
ENV NOTARY_COMMIT 77bced079e83d80f40c1f0a544b1a8a3b97fb052
|
||||
ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
||||
@@ -13,7 +13,7 @@ databases, and backend services without depending on a particular stack
|
||||
or provider.
|
||||
|
||||
Docker began as an open-source implementation of the deployment engine which
|
||||
powers [dotCloud](https://dotcloud.com), a popular Platform-as-a-Service.
|
||||
powers [dotCloud](https://www.dotcloud.com), a popular Platform-as-a-Service.
|
||||
It benefits directly from the experience accumulated over several years
|
||||
of large-scale operation and support of hundreds of thousands of
|
||||
applications and databases.
|
||||
|
||||
@@ -115,8 +115,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
||||
}
|
||||
|
||||
// Resolve the FROM lines in the Dockerfile to trusted digest references
|
||||
// using Notary.
|
||||
newDockerfile, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
|
||||
// using Notary. On a successful build, we must tag the resolved digests
|
||||
// to the original name specified in the Dockerfile.
|
||||
newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to process Dockerfile: %v", err)
|
||||
}
|
||||
@@ -291,7 +292,20 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
||||
}
|
||||
return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||
}
|
||||
return err
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the build was successful, now we must tag any of the resolved
|
||||
// images from the above Dockerfile rewrite.
|
||||
for _, resolved := range resolvedTags {
|
||||
if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDockerfileRelPath uses the given context directory for a `docker build`
|
||||
@@ -302,6 +316,22 @@ func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDi
|
||||
return "", "", fmt.Errorf("unable to get absolute context directory: %v", err)
|
||||
}
|
||||
|
||||
// The context dir might be a symbolic link, so follow it to the actual
|
||||
// target directory.
|
||||
absContextDir, err = filepath.EvalSymlinks(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err)
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err)
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
return "", "", fmt.Errorf("context must be a directory: %s", absContextDir)
|
||||
}
|
||||
|
||||
absDockerfile := givenDockerfile
|
||||
if absDockerfile == "" {
|
||||
// No -f/--file was specified so use the default relative to the
|
||||
@@ -467,14 +497,21 @@ func (td *trustedDockerfile) Close() error {
|
||||
return os.Remove(td.File.Name())
|
||||
}
|
||||
|
||||
// resolvedTag records the repository, tag, and resolved digest reference
|
||||
// from a Dockerfile rewrite.
|
||||
type resolvedTag struct {
|
||||
repoInfo *registry.RepositoryInfo
|
||||
digestRef, tagRef registry.Reference
|
||||
}
|
||||
|
||||
// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in
|
||||
// "FROM <image>" instructions to a digest reference. `translator` is a
|
||||
// function that takes a repository name and tag reference and returns a
|
||||
// trusted digest reference.
|
||||
func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, err error) {
|
||||
func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) {
|
||||
dockerfile, err := os.Open(dockerfileName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open Dockerfile: %v", err)
|
||||
return nil, nil, fmt.Errorf("unable to open Dockerfile: %v", err)
|
||||
}
|
||||
defer dockerfile.Close()
|
||||
|
||||
@@ -483,7 +520,7 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist
|
||||
// Make a tempfile to store the rewritten Dockerfile.
|
||||
tempFile, err := ioutil.TempFile("", "trusted-dockerfile-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err)
|
||||
return nil, nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err)
|
||||
}
|
||||
|
||||
trustedFile := &trustedDockerfile{
|
||||
@@ -509,21 +546,32 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist
|
||||
if tag == "" {
|
||||
tag = tags.DEFAULTTAG
|
||||
}
|
||||
|
||||
repoInfo, err := registry.ParseRepositoryInfo(repo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to parse repository info: %v", err)
|
||||
}
|
||||
|
||||
ref := registry.ParseReference(tag)
|
||||
|
||||
if !ref.HasDigest() && isTrusted() {
|
||||
trustedRef, err := translator(repo, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.ImageName(repo)))
|
||||
resolvedTags = append(resolvedTags, &resolvedTag{
|
||||
repoInfo: repoInfo,
|
||||
digestRef: trustedRef,
|
||||
tagRef: ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
n, err := fmt.Fprintln(tempFile, line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
trustedFile.size += int64(n)
|
||||
@@ -531,7 +579,7 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist
|
||||
|
||||
tempFile.Seek(0, os.SEEK_SET)
|
||||
|
||||
return trustedFile, scanner.Err()
|
||||
return trustedFile, resolvedTags, scanner.Err()
|
||||
}
|
||||
|
||||
// replaceDockerfileTarWrapper wraps the given input tar archive stream and
|
||||
|
||||
@@ -232,6 +232,20 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
// Prepare destination copy info by stat-ing the container path.
|
||||
dstInfo := archive.CopyInfo{Path: dstPath}
|
||||
dstStat, err := cli.statContainerPath(dstContainer, dstPath)
|
||||
|
||||
// If the destination is a symbolic link, we should evaluate it.
|
||||
if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
|
||||
linkTarget := dstStat.LinkTarget
|
||||
if !filepath.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := archive.SplitPathDirEntry(dstPath)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
dstInfo.Path = linkTarget
|
||||
dstStat, err = cli.statContainerPath(dstContainer, linkTarget)
|
||||
}
|
||||
|
||||
// Ignore any error and assume that the parent directory of the destination
|
||||
// path exists, in which case the copy may still succeed. If there is any
|
||||
// type of conflict (e.g., non-directory overwriting an existing directory
|
||||
@@ -242,15 +256,26 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir()
|
||||
}
|
||||
|
||||
var content io.Reader
|
||||
var (
|
||||
content io.Reader
|
||||
resolvedDstPath string
|
||||
)
|
||||
|
||||
if srcPath == "-" {
|
||||
// Use STDIN.
|
||||
content = os.Stdin
|
||||
resolvedDstPath = dstInfo.Path
|
||||
if !dstInfo.IsDir {
|
||||
return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath))
|
||||
}
|
||||
} else {
|
||||
srcArchive, err := archive.TarResource(srcPath)
|
||||
// Prepare source copy info.
|
||||
srcInfo, err := archive.CopyInfoSourcePath(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -262,12 +287,6 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
// it to the specified directory in the container we get the disired
|
||||
// copy behavior.
|
||||
|
||||
// Prepare source copy info.
|
||||
srcInfo, err := archive.CopyInfoStatPath(srcPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// See comments in the implementation of `archive.PrepareArchiveCopy`
|
||||
// for exactly what goes into deciding how and whether the source
|
||||
// archive needs to be altered for the correct copy behavior when it is
|
||||
@@ -280,12 +299,12 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
}
|
||||
defer preparedArchive.Close()
|
||||
|
||||
dstPath = dstDir
|
||||
resolvedDstPath = dstDir
|
||||
content = preparedArchive
|
||||
}
|
||||
|
||||
query := make(url.Values, 2)
|
||||
query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
|
||||
query.Set("path", filepath.ToSlash(resolvedDstPath)) // Normalize the paths used in the API.
|
||||
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
|
||||
query.Set("noOverwriteDirNonDir", "true")
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
|
||||
|
||||
f := *format
|
||||
if len(f) == 0 {
|
||||
if len(cli.PsFormat()) > 0 {
|
||||
if len(cli.PsFormat()) > 0 && !*quiet {
|
||||
f = cli.PsFormat()
|
||||
} else {
|
||||
f = "table"
|
||||
|
||||
@@ -170,9 +170,11 @@ func customFormat(ctx Context, containers []types.Container) {
|
||||
format += "\t{{.Size}}"
|
||||
}
|
||||
|
||||
tmpl, err := template.New("ps template").Parse(format)
|
||||
tmpl, err := template.New("").Parse(format)
|
||||
if err != nil {
|
||||
buffer.WriteString(fmt.Sprintf("Invalid `docker ps` format: %v\n", err))
|
||||
buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err))
|
||||
buffer.WriteTo(ctx.Output)
|
||||
return
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
@@ -181,8 +183,9 @@ func customFormat(ctx Context, containers []types.Container) {
|
||||
c: container,
|
||||
}
|
||||
if err := tmpl.Execute(buffer, containerCtx); err != nil {
|
||||
buffer = bytes.NewBufferString(fmt.Sprintf("Invalid `docker ps` format: %v\n", err))
|
||||
break
|
||||
buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err))
|
||||
buffer.WriteTo(ctx.Output)
|
||||
return
|
||||
}
|
||||
if table && len(header) == 0 {
|
||||
header = containerCtx.fullHeader()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package ps
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -10,7 +11,7 @@ import (
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
)
|
||||
|
||||
func TestContainerContextID(t *testing.T) {
|
||||
func TestContainerPsContext(t *testing.T) {
|
||||
containerId := stringid.GenerateRandomID()
|
||||
unix := time.Now().Unix()
|
||||
|
||||
@@ -86,3 +87,16 @@ func TestContainerContextID(t *testing.T) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainerPsFormatError(t *testing.T) {
|
||||
out := bytes.NewBufferString("")
|
||||
ctx := Context{
|
||||
Format: "{{InvalidFunction}}",
|
||||
Output: out,
|
||||
}
|
||||
|
||||
customFormat(ctx, make([]types.Container, 0))
|
||||
if out.String() != "Template parsing error: template: :1: function \"InvalidFunction\" not defined\n" {
|
||||
t.Fatalf("Expected format error, got `%v`\n", out.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -176,11 +177,16 @@ func convertTarget(t client.Target) (target, error) {
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever {
|
||||
baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out)
|
||||
aliasMap := map[string]string{
|
||||
"root": "offline",
|
||||
"snapshot": "tagging",
|
||||
"targets": "tagging",
|
||||
}
|
||||
baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap)
|
||||
env := map[string]string{
|
||||
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
|
||||
"targets": os.Getenv("DOCKER_CONTENT_TRUST_TARGET_PASSPHRASE"),
|
||||
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_SNAPSHOT_PASSPHRASE"),
|
||||
"root": os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"),
|
||||
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"),
|
||||
"targets": os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"),
|
||||
}
|
||||
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
if v := env[alias]; v != "" {
|
||||
@@ -311,6 +317,22 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
|
||||
return nil
|
||||
}
|
||||
|
||||
func selectKey(keys map[string]string) string {
|
||||
if len(keys) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keyIDs := []string{}
|
||||
for k := range keys {
|
||||
keyIDs = append(keyIDs, k)
|
||||
}
|
||||
|
||||
// TODO(dmcgowan): let user choose if multiple keys, now pick consistently
|
||||
sort.Strings(keyIDs)
|
||||
|
||||
return keyIDs[0]
|
||||
}
|
||||
|
||||
func targetStream(in io.Writer) (io.WriteCloser, <-chan []target) {
|
||||
r, w := io.Pipe()
|
||||
out := io.MultiWriter(in, w)
|
||||
@@ -409,16 +431,13 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
|
||||
|
||||
ks := repo.KeyStoreManager
|
||||
keys := ks.RootKeyStore().ListKeys()
|
||||
var rootKey string
|
||||
|
||||
if len(keys) == 0 {
|
||||
rootKey := selectKey(keys)
|
||||
if rootKey == "" {
|
||||
rootKey, err = ks.GenRootKey("ecdsa")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// TODO(dmcgowan): let user choose
|
||||
rootKey = keys[0]
|
||||
}
|
||||
|
||||
cryptoService, err := ks.GetRootCryptoService(rootKey)
|
||||
|
||||
@@ -298,7 +298,13 @@ func (s *Server) postContainersKill(version version.Version, w http.ResponseWrit
|
||||
}
|
||||
|
||||
if err := s.daemon.ContainerKill(name, sig); err != nil {
|
||||
return err
|
||||
_, isStopped := err.(daemon.ErrContainerNotRunning)
|
||||
// Return error that's not caused because the container is stopped.
|
||||
// Return error if the container is not running and the api is >= 1.20
|
||||
// to keep backwards compatibility.
|
||||
if version.GreaterThanOrEqualTo("1.20") || !isStopped {
|
||||
return fmt.Errorf("Cannot kill container %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
@@ -109,7 +109,7 @@ func allocateDaemonPort(addr string) error {
|
||||
|
||||
func adjustCpuShares(version version.Version, hostConfig *runconfig.HostConfig) {
|
||||
if version.LessThan("1.19") {
|
||||
if hostConfig.CpuShares > 0 {
|
||||
if hostConfig != nil && hostConfig.CpuShares > 0 {
|
||||
// Handle unsupported CpuShares
|
||||
if hostConfig.CpuShares < linuxMinCpuShares {
|
||||
logrus.Warnf("Changing requested CpuShares of %d to minimum allowed of %d", hostConfig.CpuShares, linuxMinCpuShares)
|
||||
|
||||
@@ -86,7 +86,7 @@ type ImageInspect struct {
|
||||
Id string
|
||||
Parent string
|
||||
Comment string
|
||||
Created time.Time
|
||||
Created string
|
||||
Container string
|
||||
ContainerConfig *runconfig.Config
|
||||
DockerVersion string
|
||||
@@ -130,14 +130,13 @@ type CopyConfig struct {
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET /containers/{name:.*}/archive
|
||||
// "name" is the file or directory name.
|
||||
// "path" is the absolute path to the resource in the container.
|
||||
// "name" is basename of the resource.
|
||||
type ContainerPathStat struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// GET "/containers/{name:.*}/top"
|
||||
@@ -215,14 +214,14 @@ type ContainerState struct {
|
||||
Pid int
|
||||
ExitCode int
|
||||
Error string
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
StartedAt string
|
||||
FinishedAt string
|
||||
}
|
||||
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSONBase struct {
|
||||
Id string
|
||||
Created time.Time
|
||||
Created string
|
||||
Path string
|
||||
Args []string
|
||||
State *ContainerState
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
#include <tunables/global>
|
||||
|
||||
profile docker-default flags=(attach_disconnected,mediate_deleted) {
|
||||
#include <abstractions/base>
|
||||
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
deny @{PROC}/sys/fs/** wklx,
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
|
||||
deny @{PROC}/sys/kernel/*/** wklx,
|
||||
|
||||
deny mount,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/efi/efivars/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
@{DOCKER_GRAPH_PATH}=/var/lib/docker
|
||||
|
||||
profile /usr/bin/docker (attach_disconnected) {
|
||||
profile /usr/bin/docker (attach_disconnected, complain) {
|
||||
# Prevent following links to these files during container setup.
|
||||
deny /etc/** mkl,
|
||||
deny /dev/** kl,
|
||||
@@ -21,51 +21,131 @@ profile /usr/bin/docker (attach_disconnected) {
|
||||
ipc rw,
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
owner /** rw,
|
||||
/var/lib/docker/** rwl,
|
||||
|
||||
# For non-root client use:
|
||||
/dev/urandom r,
|
||||
/run/docker.sock rw,
|
||||
/proc/** r,
|
||||
/sys/kernel/mm/hugepages/ r,
|
||||
/etc/localtime r,
|
||||
|
||||
ptrace peer=@{profile_name},
|
||||
ptrace (read) peer=docker-default,
|
||||
deny ptrace (trace) peer=docker-default,
|
||||
deny ptrace peer=/usr/bin/docker///bin/ps,
|
||||
|
||||
/usr/bin/docker pix,
|
||||
/sbin/xtables-multi rCix,
|
||||
/sbin/xtables-multi rCx,
|
||||
/sbin/iptables rCx,
|
||||
/sbin/modprobe rCx,
|
||||
/sbin/auplink rCx,
|
||||
/bin/kmod rCx,
|
||||
/usr/bin/xz rCx,
|
||||
/bin/ps rCx,
|
||||
/bin/cat rCx,
|
||||
/sbin/zfs rCx,
|
||||
|
||||
# Transitions
|
||||
change_profile -> docker-*,
|
||||
change_profile -> unconfined,
|
||||
|
||||
profile /sbin/iptables {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability net_admin,
|
||||
}
|
||||
profile /sbin/auplink flags=(attach_disconnected) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_admin,
|
||||
capability dac_override,
|
||||
profile /bin/cat (complain) {
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/dev/null rw,
|
||||
/proc r,
|
||||
/bin/cat mr,
|
||||
|
||||
@{DOCKER_GRAPH_PATH}/aufs/** rw,
|
||||
# For user namespaces:
|
||||
@{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw,
|
||||
|
||||
# The following may be removed via delegates
|
||||
/sys/fs/aufs/** r,
|
||||
/lib/** r,
|
||||
/apparmor/.null r,
|
||||
/dev/null rw,
|
||||
/etc/ld.so.cache r,
|
||||
/sbin/auplink rm,
|
||||
/proc/fs/aufs/** rw,
|
||||
/proc/[0-9]*/mounts rw,
|
||||
# For reading in 'docker stats':
|
||||
/proc/[0-9]*/net/dev r,
|
||||
}
|
||||
profile /sbin/modprobe {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_module,
|
||||
file,
|
||||
profile /bin/ps (complain) {
|
||||
/etc/ld.so.cache r,
|
||||
/etc/localtime r,
|
||||
/etc/passwd r,
|
||||
/etc/nsswitch.conf r,
|
||||
/lib/** r,
|
||||
/proc/[0-9]*/** r,
|
||||
/dev/null rw,
|
||||
/bin/ps mr,
|
||||
|
||||
# We don't need ptrace so we'll deny and ignore the error.
|
||||
deny ptrace (read, trace),
|
||||
|
||||
# Quiet dac_override denials
|
||||
deny capability dac_override,
|
||||
deny capability dac_read_search,
|
||||
deny capability sys_ptrace,
|
||||
|
||||
/dev/tty r,
|
||||
/proc/stat r,
|
||||
/proc/cpuinfo r,
|
||||
/proc/meminfo r,
|
||||
/proc/uptime r,
|
||||
/sys/devices/system/cpu/online r,
|
||||
/proc/sys/kernel/pid_max r,
|
||||
/proc/ r,
|
||||
/proc/tty/drivers r,
|
||||
}
|
||||
profile /sbin/iptables (complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability net_admin,
|
||||
}
|
||||
profile /sbin/auplink flags=(attach_disconnected, complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_admin,
|
||||
capability dac_override,
|
||||
|
||||
@{DOCKER_GRAPH_PATH}/aufs/** rw,
|
||||
@{DOCKER_GRAPH_PATH}/tmp/** rw,
|
||||
# For user namespaces:
|
||||
@{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw,
|
||||
|
||||
/sys/fs/aufs/** r,
|
||||
/lib/** r,
|
||||
/apparmor/.null r,
|
||||
/dev/null rw,
|
||||
/etc/ld.so.cache r,
|
||||
/sbin/auplink rm,
|
||||
/proc/fs/aufs/** rw,
|
||||
/proc/[0-9]*/mounts rw,
|
||||
}
|
||||
profile /sbin/modprobe /bin/kmod (complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_module,
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/dev/null rw,
|
||||
/apparmor/.null rw,
|
||||
/sbin/modprobe rm,
|
||||
/bin/kmod rm,
|
||||
/proc/cmdline r,
|
||||
/sys/module/** r,
|
||||
/etc/modprobe.d{/,/**} r,
|
||||
}
|
||||
# xz works via pipes, so we do not need access to the filesystem.
|
||||
profile /usr/bin/xz {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
profile /usr/bin/xz (complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/usr/bin/xz rm,
|
||||
deny /proc/** rw,
|
||||
deny /sys/** rw,
|
||||
}
|
||||
profile /sbin/xtables-multi (attach_disconnected, complain) {
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/sbin/xtables-multi rm,
|
||||
/apparmor/.null w,
|
||||
/dev/null rw,
|
||||
capability net_raw,
|
||||
capability net_admin,
|
||||
network raw,
|
||||
}
|
||||
profile /sbin/zfs (attach_disconnected, complain) {
|
||||
file,
|
||||
capability,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
FROM debian:jessie
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
FROM debian:stretch
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
FROM debian:wheezy
|
||||
RUN echo deb http://http.debian.net/debian wheezy-backports main > /etc/apt/sources.list.d/wheezy-backports.list
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
|
||||
@@ -50,7 +50,6 @@ for version in "${versions[@]}"; do
|
||||
build-essential # "essential for building Debian packages"
|
||||
curl ca-certificates # for downloading Go
|
||||
debhelper # for easy ".deb" building
|
||||
dh-apparmor # for apparmor debhelper
|
||||
dh-systemd # for systemd debhelper integration
|
||||
git # for "git commit" info in "docker -v"
|
||||
libapparmor-dev # for "sys/apparmor.h"
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:precise
|
||||
FROM ubuntu:precise
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion build-essential curl ca-certificates debhelper dh-apparmor git libapparmor-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion build-essential curl ca-certificates debhelper git libapparmor-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
@@ -2,9 +2,9 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:wily
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
@@ -2,9 +2,9 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:vivid
|
||||
FROM ubuntu:vivid
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
@@ -2,9 +2,9 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:trusty
|
||||
FROM ubuntu:wily
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
@@ -27,7 +27,7 @@
|
||||
# This order should be applied to lists, alternatives and code blocks.
|
||||
|
||||
__docker_q() {
|
||||
docker ${host:+-H "$host"} 2>/dev/null "$@"
|
||||
docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@"
|
||||
}
|
||||
|
||||
__docker_containers_all() {
|
||||
@@ -139,7 +139,7 @@ __docker_value_of_option() {
|
||||
local counter=$((command_pos + 1))
|
||||
while [ $counter -lt $cword ]; do
|
||||
case ${words[$counter]} in
|
||||
$option_glob )
|
||||
@($option_glob) )
|
||||
echo ${words[$counter + 1]}
|
||||
break
|
||||
;;
|
||||
@@ -229,11 +229,12 @@ __docker_log_driver_options() {
|
||||
# see docs/reference/logging/index.md
|
||||
local fluentd_options="fluentd-address fluentd-tag"
|
||||
local gelf_options="gelf-address gelf-tag"
|
||||
local json_file_options="max-file max-size"
|
||||
local syslog_options="syslog-address syslog-facility syslog-tag"
|
||||
|
||||
case $(__docker_value_of_option --log-driver) in
|
||||
'')
|
||||
COMPREPLY=( $( compgen -W "$fluentd_options $gelf_options $syslog_options" -S = -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "$fluentd_options $gelf_options $json_file_options $syslog_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
fluentd)
|
||||
COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) )
|
||||
@@ -241,6 +242,9 @@ __docker_log_driver_options() {
|
||||
gelf)
|
||||
COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
json-file)
|
||||
COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
syslog)
|
||||
COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
@@ -295,6 +299,10 @@ __docker_complete_log_driver_options() {
|
||||
return 1
|
||||
}
|
||||
|
||||
__docker_log_levels() {
|
||||
COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) )
|
||||
}
|
||||
|
||||
# a selection of the available signals that is most likely of interest in the
|
||||
# context of docker containers.
|
||||
__docker_signals() {
|
||||
@@ -312,61 +320,34 @@ __docker_signals() {
|
||||
COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) )
|
||||
}
|
||||
|
||||
# global options that may appear after the docker command
|
||||
_docker_docker() {
|
||||
local boolean_options="
|
||||
--daemon -d
|
||||
--debug -D
|
||||
--help -h
|
||||
--icc
|
||||
--ip-forward
|
||||
--ip-masq
|
||||
--iptables
|
||||
--ipv6
|
||||
--selinux-enabled
|
||||
--tls
|
||||
--tlsverify
|
||||
--userland-proxy=false
|
||||
$global_boolean_options
|
||||
--help
|
||||
--version -v
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
--exec-root|--graph|-g)
|
||||
--config)
|
||||
_filedir -d
|
||||
return
|
||||
;;
|
||||
--log-driver)
|
||||
__docker_log_drivers
|
||||
return
|
||||
;;
|
||||
--log-level|-l)
|
||||
COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) )
|
||||
__docker_log_levels
|
||||
return
|
||||
;;
|
||||
--log-opt)
|
||||
__docker_log_driver_options
|
||||
return
|
||||
;;
|
||||
--pidfile|-p|--tlscacert|--tlscert|--tlskey)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
--storage-driver|-s)
|
||||
COMPREPLY=( $( compgen -W "aufs devicemapper btrfs overlay" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) )
|
||||
return
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
$(__docker_to_extglob "$global_options_with_args") )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
__docker_complete_log_driver_options && return
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $main_options_with_args" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter="$(__docker_pos_first_nonflag $main_options_with_args_glob)"
|
||||
local counter=$( __docker_pos_first_nonflag $(__docker_to_extglob "$global_options_with_args") )
|
||||
if [ $cword -eq $counter ]; then
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
fi
|
||||
@@ -478,6 +459,137 @@ _docker_create() {
|
||||
_docker_run
|
||||
}
|
||||
|
||||
_docker_daemon() {
|
||||
local boolean_options="
|
||||
$global_boolean_options
|
||||
--help
|
||||
--icc=false
|
||||
--ip-forward=false
|
||||
--ip-masq=false
|
||||
--iptables=false
|
||||
--ipv6
|
||||
--selinux-enabled
|
||||
--userland-proxy=false
|
||||
"
|
||||
local options_with_args="
|
||||
$global_options_with_args
|
||||
--api-cors-header
|
||||
--bip
|
||||
--bridge -b
|
||||
--default-gateway
|
||||
--default-gateway-v6
|
||||
--default-ulimit
|
||||
--dns
|
||||
--dns-search
|
||||
--exec-driver -e
|
||||
--exec-opt
|
||||
--exec-root
|
||||
--fixed-cidr
|
||||
--fixed-cidr-v6
|
||||
--graph -g
|
||||
--group -G
|
||||
--insecure-registry
|
||||
--ip
|
||||
--label
|
||||
--log-driver
|
||||
--log-opt
|
||||
--mtu
|
||||
--pidfile -p
|
||||
--registry-mirror
|
||||
--storage-driver -s
|
||||
--storage-opt
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
--exec-root|--graph|-g)
|
||||
_filedir -d
|
||||
return
|
||||
;;
|
||||
--log-driver)
|
||||
__docker_log_drivers
|
||||
return
|
||||
;;
|
||||
--pidfile|-p|--tlscacert|--tlscert|--tlskey)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
--storage-driver|-s)
|
||||
COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) )
|
||||
return
|
||||
;;
|
||||
--storage-opt)
|
||||
local devicemapper_options="
|
||||
dm.basesize
|
||||
dm.blkdiscard
|
||||
dm.blocksize
|
||||
dm.fs
|
||||
dm.loopdatasize
|
||||
dm.loopmetadatasize
|
||||
dm.mkfsarg
|
||||
dm.mountopt
|
||||
dm.override_udev_sync_check
|
||||
dm.thinpooldev
|
||||
"
|
||||
local zfs_options="zfs.fsname"
|
||||
|
||||
case $(__docker_value_of_option '--storage-driver|-s') in
|
||||
'')
|
||||
COMPREPLY=( $( compgen -W "$devicemapper_options $zfs_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
devicemapper)
|
||||
COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
zfs)
|
||||
COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
compopt -o nospace
|
||||
return
|
||||
;;
|
||||
--log-level|-l)
|
||||
__docker_log_levels
|
||||
return
|
||||
;;
|
||||
--log-opt)
|
||||
__docker_log_driver_options
|
||||
return
|
||||
;;
|
||||
$(__docker_to_extglob "$options_with_args") )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
__docker_complete_log_driver_options && return
|
||||
|
||||
case "${words[$cword-2]}$prev=" in
|
||||
*dm.blkdiscard=*)
|
||||
COMPREPLY=( $( compgen -W "false true" -- "${cur#=}" ) )
|
||||
return
|
||||
;;
|
||||
*dm.fs=*)
|
||||
COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur#=}" ) )
|
||||
return
|
||||
;;
|
||||
*dm.override_udev_sync_check=*)
|
||||
COMPREPLY=( $( compgen -W "false true" -- "${cur#=}" ) )
|
||||
return
|
||||
;;
|
||||
*dm.thinpooldev=*)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_diff() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
@@ -685,8 +797,17 @@ _docker_inspect() {
|
||||
COMPREPLY=( $( compgen -W "--format -f --type --help" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_and_images
|
||||
;;
|
||||
case $(__docker_value_of_option --type) in
|
||||
'')
|
||||
__docker_containers_and_images
|
||||
;;
|
||||
container)
|
||||
__docker_containers_all
|
||||
;;
|
||||
image)
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
;;
|
||||
esac
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -803,7 +924,7 @@ _docker_ps() {
|
||||
compopt -o nospace
|
||||
return
|
||||
;;
|
||||
-n)
|
||||
--format|-n)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
@@ -827,7 +948,7 @@ _docker_ps() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--all -a --before --filter -f --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--all -a --before --filter -f --format --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@@ -932,15 +1053,16 @@ _docker_rmi() {
|
||||
_docker_run() {
|
||||
local options_with_args="
|
||||
--add-host
|
||||
--blkio-weight
|
||||
--attach -a
|
||||
--blkio-weight
|
||||
--cap-add
|
||||
--cap-drop
|
||||
--cgroup-parent
|
||||
--cidfile
|
||||
--cpuset
|
||||
--cpu-period
|
||||
--cpu-quota
|
||||
--cpuset-cpus
|
||||
--cpuset-mems
|
||||
--cpu-shares -c
|
||||
--device
|
||||
--dns
|
||||
@@ -952,8 +1074,8 @@ _docker_run() {
|
||||
--group-add
|
||||
--hostname -h
|
||||
--ipc
|
||||
--label -l
|
||||
--label-file
|
||||
--label -l
|
||||
--link
|
||||
--log-driver
|
||||
--log-opt
|
||||
@@ -961,14 +1083,15 @@ _docker_run() {
|
||||
--mac-address
|
||||
--memory -m
|
||||
--memory-swap
|
||||
--memory-swappiness
|
||||
--name
|
||||
--net
|
||||
--pid
|
||||
--publish -p
|
||||
--restart
|
||||
--security-opt
|
||||
--user -u
|
||||
--ulimit
|
||||
--user -u
|
||||
--uts
|
||||
--volumes-from
|
||||
--volume -v
|
||||
@@ -976,8 +1099,10 @@ _docker_run() {
|
||||
"
|
||||
|
||||
local all_options="$options_with_args
|
||||
--disable-content-trust=false
|
||||
--help
|
||||
--interactive -i
|
||||
--oom-kill-disable
|
||||
--privileged
|
||||
--publish-all -P
|
||||
--read-only
|
||||
@@ -987,7 +1112,7 @@ _docker_run() {
|
||||
[ "$command" = "run" ] && all_options="$all_options
|
||||
--detach -d
|
||||
--rm
|
||||
--sig-proxy
|
||||
--sig-proxy=false
|
||||
"
|
||||
|
||||
local options_with_args_glob=$(__docker_to_extglob "$options_with_args")
|
||||
@@ -1287,6 +1412,7 @@ _docker() {
|
||||
commit
|
||||
cp
|
||||
create
|
||||
daemon
|
||||
diff
|
||||
events
|
||||
exec
|
||||
@@ -1323,41 +1449,23 @@ _docker() {
|
||||
wait
|
||||
)
|
||||
|
||||
local main_options_with_args="
|
||||
--api-cors-header
|
||||
--bip
|
||||
--bridge -b
|
||||
--default-gateway
|
||||
--default-gateway-v6
|
||||
--default-ulimit
|
||||
--dns
|
||||
--dns-search
|
||||
--exec-driver -e
|
||||
--exec-opt
|
||||
--exec-root
|
||||
--fixed-cidr
|
||||
--fixed-cidr-v6
|
||||
--graph -g
|
||||
--group -G
|
||||
# These options are valid as global options for all client commands
|
||||
# and valid as command options for `docker daemon`
|
||||
local global_boolean_options="
|
||||
--debug -D
|
||||
--tls
|
||||
--tlsverify
|
||||
"
|
||||
local global_options_with_args="
|
||||
--config
|
||||
--host -H
|
||||
--insecure-registry
|
||||
--ip
|
||||
--label
|
||||
--log-driver
|
||||
--log-level -l
|
||||
--log-opt
|
||||
--mtu
|
||||
--pidfile -p
|
||||
--registry-mirror
|
||||
--storage-driver -s
|
||||
--storage-opt
|
||||
--tlscacert
|
||||
--tlscert
|
||||
--tlskey
|
||||
"
|
||||
|
||||
local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args")
|
||||
local host
|
||||
local host config
|
||||
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
@@ -1372,7 +1480,12 @@ _docker() {
|
||||
(( counter++ ))
|
||||
host="${words[$counter]}"
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
# save config so that completion can use custom configuration directories
|
||||
--config)
|
||||
(( counter++ ))
|
||||
config="${words[$counter]}"
|
||||
;;
|
||||
$(__docker_to_extglob "$global_options_with_args") )
|
||||
(( counter++ ))
|
||||
;;
|
||||
-*)
|
||||
|
||||
@@ -5,6 +5,7 @@ After=network.target docker.socket
|
||||
Requires=docker.socket
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart=/usr/bin/docker daemon -H fd://
|
||||
MountFlags=slave
|
||||
LimitNOFILE=1048576
|
||||
|
||||
@@ -70,6 +70,66 @@ func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNon
|
||||
return container.ExtractToDir(path, noOverwriteDirNonDir, content)
|
||||
}
|
||||
|
||||
// resolvePath resolves the given path in the container to a resource on the
|
||||
// host. Returns a resolved path (absolute path to the resource on the host),
|
||||
// the absolute path to the resource relative to the container's rootfs, and
|
||||
// a error if the path points to outside the container's rootfs.
|
||||
func (container *Container) resolvePath(path string) (resolvedPath, absPath string, err error) {
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
|
||||
// Split the absPath into its Directory and Base components. We will
|
||||
// resolve the dir in the scope of the container then append the base.
|
||||
dirPath, basePath := filepath.Split(absPath)
|
||||
|
||||
resolvedDirPath, err := container.GetResourcePath(dirPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
|
||||
return resolvedPath, absPath, nil
|
||||
}
|
||||
|
||||
// statPath is the unexported version of StatPath. Locks and mounts should
|
||||
// be aquired before calling this method and the given path should be fully
|
||||
// resolved to a path on the host corresponding to the given absolute path
|
||||
// inside the container.
|
||||
func (container *Container) statPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var linkTarget string
|
||||
if lstat.Mode()&os.ModeSymlink != 0 {
|
||||
// Fully evaluate the symlink in the scope of the container rootfs.
|
||||
hostPath, err := container.GetResourcePath(absPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
linkTarget, err = filepath.Rel(container.basefs, hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make it an absolute path.
|
||||
linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
|
||||
}
|
||||
|
||||
return &types.ContainerPathStat{
|
||||
Name: filepath.Base(absPath),
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
LinkTarget: linkTarget,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StatPath stats the filesystem resource at the specified path in this
|
||||
// container. Returns stat info about the resource.
|
||||
func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
|
||||
@@ -87,39 +147,12 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := path
|
||||
if !filepath.IsAbs(absPath) {
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
|
||||
}
|
||||
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
resolvedPath, absPath, err := container.resolvePath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// A trailing "." or separator has important meaning. For example, if
|
||||
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
|
||||
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
|
||||
// target. If the basename of the path is ".", it means to archive the
|
||||
// contents of the directory with "." as the first path component rather
|
||||
// than the name of the directory. This would cause extraction of the
|
||||
// archive to *not* make another directory, but instead use the current
|
||||
// directory.
|
||||
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
|
||||
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.ContainerPathStat{
|
||||
Name: lstat.Name(),
|
||||
Path: absPath,
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
}, nil
|
||||
return container.statPath(resolvedPath, absPath)
|
||||
}
|
||||
|
||||
// ArchivePath creates an archive of the filesystem resource at the specified
|
||||
@@ -154,41 +187,25 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := path
|
||||
if !filepath.IsAbs(absPath) {
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
|
||||
}
|
||||
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
resolvedPath, absPath, err := container.resolvePath(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// A trailing "." or separator has important meaning. For example, if
|
||||
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
|
||||
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
|
||||
// target. If the basename of the path is ".", it means to archive the
|
||||
// contents of the directory with "." as the first path component rather
|
||||
// than the name of the directory. This would cause extraction of the
|
||||
// archive to *not* make another directory, but instead use the current
|
||||
// directory.
|
||||
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
|
||||
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
stat, err = container.statPath(resolvedPath, absPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
stat = &types.ContainerPathStat{
|
||||
Name: lstat.Name(),
|
||||
Path: absPath,
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
}
|
||||
|
||||
data, err := archive.TarResource(resolvedPath)
|
||||
// We need to rebase the archive entries if the last element of the
|
||||
// resolved path was a symlink that was evaluated and is now different
|
||||
// than the requested path. For example, if the given path was "/foo/bar/",
|
||||
// but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want
|
||||
// to ensure that the archive entries start with "bar" and not "baz". This
|
||||
// also catches the case when the root directory of the container is
|
||||
// requested: we want the archive entries to start with "/" and not the
|
||||
// container ID.
|
||||
data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -227,27 +244,21 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
|
||||
return err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := path
|
||||
if !filepath.IsAbs(absPath) {
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
|
||||
}
|
||||
// The destination path needs to be resolved to a host path, with all
|
||||
// symbolic links followed in the scope of the container's rootfs. Note
|
||||
// that we do not use `container.resolvePath(path)` here because we need
|
||||
// to also evaluate the last path element if it is a symlink. This is so
|
||||
// that you can extract an archive to a symlink that points to a directory.
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
|
||||
// This will evaluate the last path element if it is a symlink.
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// A trailing "." or separator has important meaning. For example, if
|
||||
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
|
||||
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
|
||||
// target. If the basename of the path is ".", it means to archive the
|
||||
// contents of the directory with "." as the first path component rather
|
||||
// than the name of the directory. This would cause extraction of the
|
||||
// archive to *not* make another directory, but instead use the current
|
||||
// directory.
|
||||
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -257,23 +268,23 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
|
||||
return ErrExtractPointNotDirectory
|
||||
}
|
||||
|
||||
// Need to check if the path is in a volume. If it is, it cannot be in a
|
||||
// read-only volume. If it is not in a volume, the container cannot be
|
||||
// configured with a read-only rootfs.
|
||||
|
||||
// Use the resolved path relative to the container rootfs as the new
|
||||
// absPath. This way we fully follow any symlinks in a volume that may
|
||||
// lead back outside the volume.
|
||||
baseRel, err := filepath.Rel(container.basefs, resolvedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
absPath = filepath.Join("/", baseRel)
|
||||
// Make it an absolute path.
|
||||
absPath = filepath.Join(string(filepath.Separator), baseRel)
|
||||
|
||||
// Need to check if the path is in a volume. If it is, it cannot be in a
|
||||
// read-only volume. If it is not in a volume, the container cannot be
|
||||
// configured with a read-only rootfs.
|
||||
var toVolume bool
|
||||
for _, mnt := range container.MountPoints {
|
||||
if toVolume = mnt.hasResource(absPath); toVolume {
|
||||
if mnt.RW {
|
||||
break
|
||||
}
|
||||
return ErrVolumeReadonly
|
||||
}
|
||||
toVolume, err := checkIfPathIsInAVolume(container, absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !toVolume && container.hostConfig.ReadonlyRootfs {
|
||||
@@ -295,3 +306,19 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
|
||||
// cannot be in a read-only volume. If it is not in a volume, the container
|
||||
// cannot be configured with a read-only rootfs.
|
||||
func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) {
|
||||
var toVolume bool
|
||||
for _, mnt := range container.MountPoints {
|
||||
if toVolume = mnt.hasResource(absPath); toVolume {
|
||||
if mnt.RW {
|
||||
break
|
||||
}
|
||||
return false, ErrVolumeReadonly
|
||||
}
|
||||
}
|
||||
return toVolume, nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,14 @@ var (
|
||||
ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only")
|
||||
)
|
||||
|
||||
type ErrContainerNotRunning struct {
|
||||
id string
|
||||
}
|
||||
|
||||
func (e ErrContainerNotRunning) Error() string {
|
||||
return fmt.Sprintf("Container %s is not running", e.id)
|
||||
}
|
||||
|
||||
type StreamConfig struct {
|
||||
stdout *broadcastwriter.BroadcastWriter
|
||||
stderr *broadcastwriter.BroadcastWriter
|
||||
@@ -371,7 +379,7 @@ func (container *Container) KillSig(sig int) error {
|
||||
}
|
||||
|
||||
if !container.Running {
|
||||
return fmt.Errorf("Container %s is not running", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// signal to the monitor that it should not restart the container
|
||||
@@ -408,7 +416,7 @@ func (container *Container) Pause() error {
|
||||
|
||||
// We cannot Pause the container which is not running
|
||||
if !container.Running {
|
||||
return fmt.Errorf("Container %s is not running, cannot pause a non-running container", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// We cannot Pause the container which is already paused
|
||||
@@ -430,7 +438,7 @@ func (container *Container) Unpause() error {
|
||||
|
||||
// We cannot unpause the container which is not running
|
||||
if !container.Running {
|
||||
return fmt.Errorf("Container %s is not running, cannot unpause a non-running container", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// We cannot unpause the container which is not paused
|
||||
@@ -448,7 +456,7 @@ func (container *Container) Unpause() error {
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
if !container.IsRunning() {
|
||||
return fmt.Errorf("Container %s is not running", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// 1. Send SIGKILL
|
||||
@@ -530,7 +538,7 @@ func (container *Container) Restart(seconds int) error {
|
||||
|
||||
func (container *Container) Resize(h, w int) error {
|
||||
if !container.IsRunning() {
|
||||
return fmt.Errorf("Cannot resize container %s, container is not running", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
|
||||
return err
|
||||
@@ -1080,8 +1088,12 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
|
||||
|
||||
func (container *Container) networkMounts() []execdriver.Mount {
|
||||
var mounts []execdriver.Mount
|
||||
mode := "Z"
|
||||
if container.hostConfig.NetworkMode.IsContainer() {
|
||||
mode = "z"
|
||||
}
|
||||
if container.ResolvConfPath != "" {
|
||||
label.SetFileLabel(container.ResolvConfPath, container.MountLabel)
|
||||
label.Relabel(container.ResolvConfPath, container.MountLabel, mode)
|
||||
mounts = append(mounts, execdriver.Mount{
|
||||
Source: container.ResolvConfPath,
|
||||
Destination: "/etc/resolv.conf",
|
||||
@@ -1090,7 +1102,7 @@ func (container *Container) networkMounts() []execdriver.Mount {
|
||||
})
|
||||
}
|
||||
if container.HostnamePath != "" {
|
||||
label.SetFileLabel(container.HostnamePath, container.MountLabel)
|
||||
label.Relabel(container.HostnamePath, container.MountLabel, mode)
|
||||
mounts = append(mounts, execdriver.Mount{
|
||||
Source: container.HostnamePath,
|
||||
Destination: "/etc/hostname",
|
||||
@@ -1099,7 +1111,7 @@ func (container *Container) networkMounts() []execdriver.Mount {
|
||||
})
|
||||
}
|
||||
if container.HostsPath != "" {
|
||||
label.SetFileLabel(container.HostsPath, container.MountLabel)
|
||||
label.Relabel(container.HostsPath, container.MountLabel, mode)
|
||||
mounts = append(mounts, execdriver.Mount{
|
||||
Source: container.HostsPath,
|
||||
Destination: "/etc/hosts",
|
||||
|
||||
@@ -272,7 +272,11 @@ func populateCommand(c *Container, env []string) error {
|
||||
BlkioWeight: c.hostConfig.BlkioWeight,
|
||||
Rlimits: rlimits,
|
||||
OomKillDisable: c.hostConfig.OomKillDisable,
|
||||
MemorySwappiness: c.hostConfig.MemorySwappiness,
|
||||
MemorySwappiness: -1,
|
||||
}
|
||||
|
||||
if c.hostConfig.MemorySwappiness != nil {
|
||||
resources.MemorySwappiness = *c.hostConfig.MemorySwappiness
|
||||
}
|
||||
|
||||
processConfig := execdriver.ProcessConfig{
|
||||
|
||||
@@ -66,9 +66,6 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
|
||||
if err := daemon.mergeAndVerifyConfig(config, img); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
|
||||
warnings = append(warnings, "IPv4 forwarding is disabled.")
|
||||
}
|
||||
if hostConfig == nil {
|
||||
hostConfig = &runconfig.HostConfig{}
|
||||
}
|
||||
|
||||
@@ -979,7 +979,7 @@ func getDefaultRouteMtu() (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
for _, r := range routes {
|
||||
if r.Default {
|
||||
if r.Default && r.Iface != nil {
|
||||
return r.Iface.MTU, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,13 +167,16 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig,
|
||||
if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
|
||||
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.")
|
||||
}
|
||||
if hostConfig.MemorySwappiness != -1 && !daemon.SystemConfig().MemorySwappiness {
|
||||
if hostConfig.MemorySwappiness != nil && !daemon.SystemConfig().MemorySwappiness {
|
||||
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
|
||||
logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
|
||||
hostConfig.MemorySwappiness = -1
|
||||
hostConfig.MemorySwappiness = nil
|
||||
}
|
||||
if hostConfig.MemorySwappiness != -1 && (hostConfig.MemorySwappiness < 0 || hostConfig.MemorySwappiness > 100) {
|
||||
return warnings, fmt.Errorf("Invalid value: %d, valid memory swappiness range is 0-100.", hostConfig.MemorySwappiness)
|
||||
if hostConfig.MemorySwappiness != nil {
|
||||
swappiness := *hostConfig.MemorySwappiness
|
||||
if swappiness < -1 || swappiness > 100 {
|
||||
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness)
|
||||
}
|
||||
}
|
||||
if hostConfig.CpuPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod {
|
||||
warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.")
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/daemon/execdriver"
|
||||
"github.com/docker/docker/daemon/execdriver/lxc"
|
||||
"github.com/docker/docker/daemon/execdriver/native"
|
||||
@@ -18,6 +19,7 @@ func NewDriver(name string, options []string, root, libPath, initPath string, sy
|
||||
// we want to give the lxc driver the full docker root because it needs
|
||||
// to access and write config and template files in /var/lib/docker/containers/*
|
||||
// to be backwards compatible
|
||||
logrus.Warn("LXC built-in support is deprecated.")
|
||||
return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor)
|
||||
case "native":
|
||||
return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, options)
|
||||
|
||||
145
daemon/execdriver/native/apparmor.go
Normal file
145
daemon/execdriver/native/apparmor.go
Normal file
@@ -0,0 +1,145 @@
|
||||
// +build linux
|
||||
|
||||
package native
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
)
|
||||
|
||||
const (
|
||||
apparmorProfilePath = "/etc/apparmor.d/docker"
|
||||
)
|
||||
|
||||
type data struct {
|
||||
Name string
|
||||
Imports []string
|
||||
InnerImports []string
|
||||
}
|
||||
|
||||
const baseTemplate = `
|
||||
{{range $value := .Imports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
|
||||
{{range $value := .InnerImports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
deny @{PROC}/sys/fs/** wklx,
|
||||
deny @{PROC}/fs/** wklx,
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/mem rwklx,
|
||||
deny @{PROC}/kmem rwklx,
|
||||
deny @{PROC}/kcore rwklx,
|
||||
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
|
||||
deny @{PROC}/sys/kernel/*/** wklx,
|
||||
|
||||
deny mount,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/efi/efivars/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
}
|
||||
`
|
||||
|
||||
func generateProfile(out io.Writer) error {
|
||||
compiled, err := template.New("apparmor_profile").Parse(baseTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := &data{
|
||||
Name: "docker-default",
|
||||
}
|
||||
if tunablesExists() {
|
||||
data.Imports = append(data.Imports, "#include <tunables/global>")
|
||||
} else {
|
||||
data.Imports = append(data.Imports, "@{PROC}=/proc/")
|
||||
}
|
||||
if abstractionsExists() {
|
||||
data.InnerImports = append(data.InnerImports, "#include <abstractions/base>")
|
||||
}
|
||||
if err := compiled.Execute(out, data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the tunables/global exist
|
||||
func tunablesExists() bool {
|
||||
_, err := os.Stat("/etc/apparmor.d/tunables/global")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// check if abstractions/base exist
|
||||
func abstractionsExists() bool {
|
||||
_, err := os.Stat("/etc/apparmor.d/abstractions/base")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func installAppArmorProfile() error {
|
||||
if !apparmor.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure /etc/apparmor.d exists
|
||||
if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := generateProfile(f); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker")
|
||||
// to use the parser directly we have to make sure we are in the correct
|
||||
// dir with the profile
|
||||
cmd.Dir = "/etc/apparmor.d"
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasAppArmorProfileLoaded(profile string) error {
|
||||
file, err := os.Open("/sys/kernel/security/apparmor/profiles")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bufio.NewReader(file)
|
||||
for {
|
||||
p, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(p, profile+" ") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error)
|
||||
}
|
||||
|
||||
/* These paths must be remounted as r/o */
|
||||
container.ReadonlyPaths = append(container.ReadonlyPaths, "/proc", "/dev")
|
||||
container.ReadonlyPaths = append(container.ReadonlyPaths, "/dev")
|
||||
}
|
||||
|
||||
if err := d.setupMounts(container, c); err != nil {
|
||||
@@ -200,7 +200,6 @@ func (d *driver) setPrivileged(container *configs.Config) (err error) {
|
||||
if apparmor.IsEnabled() {
|
||||
container.AppArmorProfile = "unconfined"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
sysinfo "github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
@@ -51,6 +52,20 @@ func NewDriver(root, initPath string, options []string) (*driver, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if apparmor.IsEnabled() {
|
||||
if err := installAppArmorProfile(); err != nil {
|
||||
apparmorProfiles := []string{"docker-default"}
|
||||
|
||||
// Allow daemon to run if loading failed, but are active
|
||||
// (possibly through another run, manually, or via system startup)
|
||||
for _, policy := range apparmorProfiles {
|
||||
if err := hasAppArmorProfileLoaded(policy); err != nil {
|
||||
return nil, fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choose cgroup manager
|
||||
// this makes sure there are no breaking changes to people
|
||||
// who upgrade from versions without native.cgroupdriver opt
|
||||
|
||||
@@ -323,7 +323,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
|
||||
}
|
||||
|
||||
func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
|
||||
return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil)
|
||||
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), nil)
|
||||
}
|
||||
|
||||
// DiffSize calculates the changes between the specified id
|
||||
|
||||
@@ -77,6 +77,7 @@ type Driver interface {
|
||||
// ApplyDiff extracts the changeset from the given diff into the
|
||||
// layer with the specified id and parent, returning the size of the
|
||||
// new layer in bytes.
|
||||
// The archive.ArchiveReader must be an uncompressed stream.
|
||||
ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
|
||||
// DiffSize calculates the changes between the specified id
|
||||
// and its parent and returns the size in bytes of the changes
|
||||
|
||||
@@ -121,7 +121,7 @@ func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveRea
|
||||
|
||||
start := time.Now().UTC()
|
||||
logrus.Debugf("Start untar layer")
|
||||
if size, err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
|
||||
if size, err = chrootarchive.ApplyUncompressedLayer(layerFs, diff); err != nil {
|
||||
return
|
||||
}
|
||||
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
||||
|
||||
@@ -411,7 +411,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if size, err = chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil {
|
||||
if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
@@ -91,13 +92,13 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
|
||||
Pid: container.State.Pid,
|
||||
ExitCode: container.State.ExitCode,
|
||||
Error: container.State.Error,
|
||||
StartedAt: container.State.StartedAt,
|
||||
FinishedAt: container.State.FinishedAt,
|
||||
StartedAt: container.State.StartedAt.Format(time.RFC3339Nano),
|
||||
FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano),
|
||||
}
|
||||
|
||||
contJSONBase := &types.ContainerJSONBase{
|
||||
Id: container.ID,
|
||||
Created: container.Created,
|
||||
Created: container.Created.Format(time.RFC3339Nano),
|
||||
Path: container.Path,
|
||||
Args: container.Args,
|
||||
State: containerState,
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
import "syscall"
|
||||
|
||||
// ContainerKill send signal to the container
|
||||
// If no signal is given (sig 0), then Kill with SIGKILL and wait
|
||||
@@ -18,12 +15,12 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
|
||||
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
|
||||
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
||||
if err := container.Kill(); err != nil {
|
||||
return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Otherwise, just send the requested signal
|
||||
if err := container.KillSig(int(sig)); err != nil {
|
||||
return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -93,9 +93,9 @@ func New(ctx logger.Context) (logger.Logger, error) {
|
||||
}
|
||||
logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s.", ctx.ContainerID, host, port, tag)
|
||||
|
||||
// logger tries to recoonect 2**64 - 1 times
|
||||
// logger tries to recoonect 2**32 - 1 times
|
||||
// failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds]
|
||||
log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxUint32})
|
||||
log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -259,7 +259,8 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
|
||||
if !config.Follow {
|
||||
return
|
||||
}
|
||||
if config.Tail == 0 {
|
||||
|
||||
if config.Tail >= 0 {
|
||||
latestFile.Seek(0, os.SEEK_END)
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,12 @@ func NewLogWatcher() *LogWatcher {
|
||||
|
||||
// Close notifies the underlying log reader to stop
|
||||
func (w *LogWatcher) Close() {
|
||||
close(w.closeNotifier)
|
||||
// only close if not already closed
|
||||
select {
|
||||
case <-w.closeNotifier:
|
||||
default:
|
||||
close(w.closeNotifier)
|
||||
}
|
||||
}
|
||||
|
||||
// WatchClose returns a channel receiver that receives notification when the watcher has been closed
|
||||
|
||||
@@ -76,6 +76,7 @@ func NewDaemonCli() *DaemonCli {
|
||||
|
||||
// TODO(tiborvass): remove InstallFlags?
|
||||
daemonConfig := new(daemon.Config)
|
||||
daemonConfig.LogConfig.Config = make(map[string]string)
|
||||
daemonConfig.InstallFlags(daemonFlags, presentInHelp)
|
||||
daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp)
|
||||
registryOptions := new(registry.Options)
|
||||
@@ -100,6 +101,7 @@ func migrateKey() (err error) {
|
||||
err = os.Remove(oldPath)
|
||||
} else {
|
||||
logrus.Warnf("Key migration failed, key file not removed at %s", oldPath)
|
||||
os.Remove(newPath)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -207,10 +209,6 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
||||
}()
|
||||
}
|
||||
|
||||
if cli.LogConfig.Config == nil {
|
||||
cli.LogConfig.Config = make(map[string]string)
|
||||
}
|
||||
|
||||
serverConfig := &apiserver.ServerConfig{
|
||||
Logging: true,
|
||||
EnableCors: cli.EnableCors,
|
||||
@@ -226,7 +224,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
||||
}
|
||||
tlsConfig, err := tlsconfig.Server(*commonFlags.TLSOptions)
|
||||
if err != nil {
|
||||
logrus.Fatalf("foobar: %v", err)
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
serverConfig.TLSConfig = tlsConfig
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func main() {
|
||||
flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet)
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ -h | --help | -v | --version ]\n\n")
|
||||
fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n")
|
||||
fmt.Fprint(os.Stdout, "A self-sufficient runtime for containers.\n\nOptions:\n")
|
||||
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
|
||||
@@ -87,8 +87,8 @@ own.
|
||||
container with this image.
|
||||
|
||||
The container exposes port 8000 on the localhost so that you can connect and
|
||||
see your changes. If you are running Boot2Docker, use the `boot2docker ip`
|
||||
to get the address of your server.
|
||||
see your changes. If you use Docker Machine, the `docker-machine ip
|
||||
<machine-name>` command gives you the address of your server.
|
||||
|
||||
6. Check your writing for style and mechanical errors.
|
||||
|
||||
@@ -158,18 +158,20 @@ update the root docs pages by running
|
||||
|
||||
$ make AWS_S3_BUCKET=dowideit-docs BUILD_ROOT=yes docs-release
|
||||
|
||||
### Errors publishing using Boot2Docker
|
||||
### Errors publishing using a Docker Machine VM
|
||||
|
||||
Sometimes, in a Boot2Docker environment, the publishing procedure returns this
|
||||
Sometimes, in a Windows or Mac environment, the publishing procedure returns this
|
||||
error:
|
||||
|
||||
Post http:///var/run/docker.sock/build?rm=1&t=docker-docs%3Apost-1.2.0-docs_update-2:
|
||||
dial unix /var/run/docker.sock: no such file or directory.
|
||||
|
||||
If this happens, set the Docker host. Run the following command to set the
|
||||
If this happens, set the Docker host. Run the following command to get the
|
||||
variables in your shell:
|
||||
|
||||
$ eval "$(boot2docker shellinit)"
|
||||
docker-machine env <machine-name>
|
||||
|
||||
Then, set your environment accordingly.
|
||||
|
||||
## Cherry-picking documentation changes to update an existing release.
|
||||
|
||||
|
||||
@@ -47,10 +47,6 @@ image cache.
|
||||
> characters of the full image ID - which can be found using
|
||||
> `docker inspect` or `docker images --no-trunc=true`.
|
||||
|
||||
> **Note:** if you are using a remote Docker daemon, such as Boot2Docker,
|
||||
> then _do not_ type the `sudo` before the `docker` commands shown in the
|
||||
> documentation's examples.
|
||||
|
||||
## Running an interactive shell
|
||||
|
||||
To run an interactive shell in the Ubuntu image:
|
||||
|
||||
@@ -11,111 +11,7 @@ weight = 7
|
||||
|
||||
# Using certificates for repository client verification
|
||||
|
||||
In [Running Docker with HTTPS](/articles/https), you learned that, by default,
|
||||
Docker runs via a non-networked Unix socket and TLS must be enabled in order
|
||||
to have the Docker client and the daemon communicate securely over HTTPS.
|
||||
|
||||
Now, you will see how to allow the Docker registry (i.e., *a server*) to
|
||||
verify that the Docker daemon (i.e., *a client*) has the right to access the
|
||||
images being hosted with *certificate-based client-server authentication*.
|
||||
|
||||
We will show you how to install a Certificate Authority (CA) root certificate
|
||||
for the registry and how to set the client TLS certificate for verification.
|
||||
|
||||
## Understanding the configuration
|
||||
|
||||
A custom certificate is configured by creating a directory under
|
||||
`/etc/docker/certs.d` using the same name as the registry's hostname (e.g.,
|
||||
`localhost`). All `*.crt` files are added to this directory as CA roots.
|
||||
|
||||
> **Note:**
|
||||
> In the absence of any root certificate authorities, Docker
|
||||
> will use the system default (i.e., host's root CA set).
|
||||
|
||||
The presence of one or more `<filename>.key/cert` pairs indicates to Docker
|
||||
that there are custom certificates required for access to the desired
|
||||
repository.
|
||||
|
||||
> **Note:**
|
||||
> If there are multiple certificates, each will be tried in alphabetical
|
||||
> order. If there is an authentication error (e.g., 403, 404, 5xx, etc.), Docker
|
||||
> will continue to try with the next certificate.
|
||||
|
||||
Our example is set up like this:
|
||||
|
||||
/etc/docker/certs.d/ <-- Certificate directory
|
||||
└── localhost <-- Hostname
|
||||
├── client.cert <-- Client certificate
|
||||
├── client.key <-- Client key
|
||||
└── localhost.crt <-- Registry certificate
|
||||
|
||||
## Creating the client certificates
|
||||
|
||||
You will use OpenSSL's `genrsa` and `req` commands to first generate an RSA
|
||||
key and then use the key to create the certificate.
|
||||
|
||||
$ openssl genrsa -out client.key 4096
|
||||
$ openssl req -new -x509 -text -key client.key -out client.cert
|
||||
|
||||
> **Warning:**:
|
||||
> Using TLS and managing a CA is an advanced topic.
|
||||
> You should be familiar with OpenSSL, x509, and TLS before
|
||||
> attempting to use them in production.
|
||||
|
||||
> **Warning:**
|
||||
> These TLS commands will only generate a working set of certificates on Linux.
|
||||
> The version of OpenSSL in Mac OS X is incompatible with the type of
|
||||
> certificate Docker requires.
|
||||
|
||||
## Testing the verification setup
|
||||
|
||||
You can test this setup by using Apache to host a Docker registry.
|
||||
For this purpose, you can copy a registry tree (containing images) inside
|
||||
the Apache root.
|
||||
|
||||
> **Note:**
|
||||
> You can find such an example [here](
|
||||
> http://people.gnome.org/~alexl/v1.tar.gz) - which contains the busybox image.
|
||||
|
||||
Once you set up the registry, you can use the following Apache configuration
|
||||
to implement certificate-based protection.
|
||||
|
||||
# This must be in the root context, otherwise it causes a re-negotiation
|
||||
# which is not supported by the TLS implementation in go
|
||||
SSLVerifyClient optional_no_ca
|
||||
|
||||
<Location /v1>
|
||||
Action cert-protected /cgi-bin/cert.cgi
|
||||
SetHandler cert-protected
|
||||
|
||||
Header set x-docker-registry-version "0.6.2"
|
||||
SetEnvIf Host (.*) custom_host=$1
|
||||
Header set X-Docker-Endpoints "%{custom_host}e"
|
||||
</Location>
|
||||
|
||||
Save the above content as `/etc/httpd/conf.d/registry.conf`, and
|
||||
continue with creating a `cert.cgi` file under `/var/www/cgi-bin/`.
|
||||
|
||||
#!/bin/bash
|
||||
if [ "$HTTPS" != "on" ]; then
|
||||
echo "Status: 403 Not using SSL"
|
||||
echo "x-docker-registry-version: 0.6.2"
|
||||
echo
|
||||
exit 0
|
||||
fi
|
||||
if [ "$SSL_CLIENT_VERIFY" == "NONE" ]; then
|
||||
echo "Status: 403 Client certificate invalid"
|
||||
echo "x-docker-registry-version: 0.6.2"
|
||||
echo
|
||||
exit 0
|
||||
fi
|
||||
echo "Content-length: $(stat --printf='%s' $PATH_TRANSLATED)"
|
||||
echo "x-docker-registry-version: 0.6.2"
|
||||
echo "X-Docker-Endpoints: $SERVER_NAME"
|
||||
echo "X-Docker-Size: 0"
|
||||
echo
|
||||
|
||||
cat $PATH_TRANSLATED
|
||||
|
||||
This CGI script will ensure that all requests to `/v1` *without* a valid
|
||||
certificate will be returned with a `403` (i.e., HTTP forbidden) error.
|
||||
The orginal content was deprecated. For information about configuring
|
||||
cerficates, see [deploying a registry
|
||||
server](http://docs.docker.com/registry/deploying/). To reach an older version
|
||||
of this content, refer to an older version of the documentation.
|
||||
|
||||
@@ -12,7 +12,7 @@ weight = 99
|
||||
# Automatically start containers
|
||||
|
||||
As of Docker 1.2,
|
||||
[restart policies](/reference/commandline/cli/#restart-policies) are the
|
||||
[restart policies](/reference/run/#restart-policies-restart) are the
|
||||
built-in Docker mechanism for restarting containers when they exit. If set,
|
||||
restart policies will be used when the Docker daemon starts up, as typically
|
||||
happens after a system boot. Restart policies will ensure that linked containers
|
||||
|
||||
@@ -58,7 +58,7 @@ First generate CA private and public keys:
|
||||
State or Province Name (full name) [Some-State]:Queensland
|
||||
Locality Name (eg, city) []:Brisbane
|
||||
Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc
|
||||
Organizational Unit Name (eg, section) []:Boot2Docker
|
||||
Organizational Unit Name (eg, section) []:Sales
|
||||
Common Name (e.g. server FQDN or YOUR name) []:$HOST
|
||||
Email Address []:Sven@home.org.au
|
||||
|
||||
|
||||
@@ -11,81 +11,8 @@ weight = 8
|
||||
|
||||
# Run a local registry mirror
|
||||
|
||||
## Why?
|
||||
|
||||
If you have multiple instances of Docker running in your environment
|
||||
(e.g., multiple physical or virtual machines, all running the Docker
|
||||
daemon), each time one of them requires an image that it doesn't have
|
||||
it will go out to the internet and fetch it from the public Docker
|
||||
registry. By running a local registry mirror, you can keep most of the
|
||||
image fetch traffic on your local network.
|
||||
|
||||
## How does it work?
|
||||
|
||||
The first time you request an image from your local registry mirror,
|
||||
it pulls the image from the public Docker registry and stores it locally
|
||||
before handing it back to you. On subsequent requests, the local registry
|
||||
mirror is able to serve the image from its own storage.
|
||||
|
||||
## How do I set up a local registry mirror?
|
||||
|
||||
There are two steps to set up and use a local registry mirror.
|
||||
|
||||
### Step 1: Configure your Docker daemons to use the local registry mirror
|
||||
|
||||
You will need to pass the `--registry-mirror` option to your Docker daemon on
|
||||
startup:
|
||||
|
||||
docker daemon --registry-mirror=http://<my-docker-mirror-host>
|
||||
|
||||
For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run:
|
||||
|
||||
docker daemon --registry-mirror=http://10.0.0.2:5000
|
||||
|
||||
**NOTE:**
|
||||
Depending on your local host setup, you may be able to add the
|
||||
`--registry-mirror` options to the `DOCKER_OPTS` variable in
|
||||
`/etc/default/docker`.
|
||||
|
||||
### Step 2: Run the local registry mirror
|
||||
|
||||
You will need to start a local registry mirror service. The
|
||||
[`registry` image](https://registry.hub.docker.com/_/registry/) provides this
|
||||
functionality. For example, to run a local registry mirror that serves on
|
||||
port `5000` and mirrors the content at `registry-1.docker.io`:
|
||||
|
||||
docker run -p 5000:5000 \
|
||||
-e STANDALONE=false \
|
||||
-e MIRROR_SOURCE=https://registry-1.docker.io \
|
||||
-e MIRROR_SOURCE_INDEX=https://index.docker.io \
|
||||
registry
|
||||
|
||||
## Test it out
|
||||
|
||||
With your mirror running, pull an image that you haven't pulled before (using
|
||||
`time` to time it):
|
||||
|
||||
$ time docker pull node:latest
|
||||
Pulling repository node
|
||||
[...]
|
||||
|
||||
real 1m14.078s
|
||||
user 0m0.176s
|
||||
sys 0m0.120s
|
||||
|
||||
Now, remove the image from your local machine:
|
||||
|
||||
$ docker rmi node:latest
|
||||
|
||||
Finally, re-pull the image:
|
||||
|
||||
$ time docker pull node:latest
|
||||
Pulling repository node
|
||||
[...]
|
||||
|
||||
real 0m51.376s
|
||||
user 0m0.120s
|
||||
sys 0m0.116s
|
||||
|
||||
The second time around, the local registry mirror served the image from storage,
|
||||
avoiding a trip out to the internet to refetch it.
|
||||
The orginal content was deprecated. [An archived
|
||||
version](https://docs.docker.com/v1.6/articles/registry_mirror) is available in
|
||||
the 1.7 documentation. For information about configuring mirrors with the latest
|
||||
Docker Registry version, please file a support request with [the Distribution
|
||||
project](https://github.com/docker/distribution/issues).
|
||||
|
||||
@@ -33,17 +33,33 @@ If you want Docker to start at boot, you should also:
|
||||
There are a number of ways to configure the daemon flags and environment variables
|
||||
for your Docker daemon.
|
||||
|
||||
If the `docker.service` file is set to use an `EnvironmentFile`
|
||||
(often pointing to `/etc/sysconfig/docker`) then you can modify the
|
||||
referenced file.
|
||||
The recommended way is to use a systemd drop-in file. These are local files in
|
||||
the `/etc/systemd/system/docker.service.d` directory. This could also be
|
||||
`/etc/systemd/system/docker.service`, which also works for overriding the
|
||||
defaults from `/lib/systemd/system/docker.service`.
|
||||
|
||||
Check if the `docker.service` uses an `EnvironmentFile`:
|
||||
However, if you had previously used a package which had an `EnvironmentFile`
|
||||
(often pointing to `/etc/sysconfig/docker`) then for backwards compatibility,
|
||||
you drop a file in the `/etc/systemd/system/docker.service.d`
|
||||
directory including the following:
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/etc/sysconfig/docker
|
||||
EnvironmentFile=-/etc/sysconfig/docker-storage
|
||||
EnvironmentFile=-/etc/sysconfig/docker-network
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/docker -d -H fd:// $OPTIONS \
|
||||
$DOCKER_STORAGE_OPTIONS \
|
||||
$DOCKER_NETWORK_OPTIONS \
|
||||
$BLOCK_REGISTRY \
|
||||
$INSECURE_REGISTRY
|
||||
|
||||
To check if the `docker.service` uses an `EnvironmentFile`:
|
||||
|
||||
$ sudo systemctl show docker | grep EnvironmentFile
|
||||
EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes)
|
||||
|
||||
Alternatively, find out where the service file is located, and look for the
|
||||
property:
|
||||
Alternatively, find out where the service file is located:
|
||||
|
||||
$ sudo systemctl status docker | grep Loaded
|
||||
Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled)
|
||||
@@ -69,18 +85,20 @@ In this example, we'll assume that your `docker.service` file looks something li
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
EnvironmentFile=-/etc/sysconfig/docker
|
||||
ExecStart=/usr/bin/docker daemon -H fd:// $OPTIONS
|
||||
ExecStart=/usr/bin/docker daemon -H fd://
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
|
||||
[Install]
|
||||
Also=docker.socket
|
||||
|
||||
This will allow us to add extra flags to the `/etc/sysconfig/docker` file by
|
||||
setting `OPTIONS`:
|
||||
This will allow us to add extra flags via a drop-in file (mentioned above) by
|
||||
placing a file containing the following in the `/etc/systemd/system/docker.service.d`
|
||||
directory:
|
||||
|
||||
OPTIONS="--graph /mnt/docker-data --storage-driver btrfs"
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/docker daemon -H fd:// --graph /mnt/docker-data --storage-driver btrfs
|
||||
|
||||
You can also set other environment variables in this file, for example, the
|
||||
`HTTP_PROXY` environment variables described below.
|
||||
|
||||
@@ -31,6 +31,12 @@ Follow the instructions in the plugin's documentation.
|
||||
|
||||
The following plugins exist:
|
||||
|
||||
* The [Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume)
|
||||
is a volume plugin that provides access to an extensible set of
|
||||
container-based persistent storage options. It supports single and multi-host Docker
|
||||
environments with features that include tenant isolation, automated
|
||||
provisioning, encryption, secure deletion, snapshots and QoS.
|
||||
|
||||
* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
|
||||
which provides multi-host portable volumes for Docker, enabling you to run
|
||||
databases and other stateful containers and move them around across a cluster
|
||||
|
||||
@@ -64,8 +64,7 @@ a container. To exit the container type `exit`.
|
||||
If you want your containers to be able to access the external network you must
|
||||
enable the `net.ipv4.ip_forward` rule.
|
||||
This can be done using YaST by browsing to the
|
||||
`Network Devices -> Network Settings -> Routing` menu and ensuring that the
|
||||
`Enable IPv4 Forwarding` box is checked.
|
||||
`System -> Network Settings -> Routing` menu (for openSUSE Tumbleweed and later) or `Network Devices -> Network Settings -> Routing` menu (for SUSE Linux Enterprise 12 and previous openSUSE versions) and ensuring that the `Enable IPv4 Forwarding` box is checked.
|
||||
|
||||
This option cannot be changed when networking is handled by the Network Manager.
|
||||
In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by
|
||||
|
||||
@@ -96,7 +96,7 @@ which is officially supported by Docker.
|
||||
>command fails for the Docker repo during installation. To work around this,
|
||||
>add the key directly using the following:
|
||||
>
|
||||
> $ wget -qO- https://get.docker.com/gpg | sudo apt-key add -
|
||||
> $ curl -sSL https://get.docker.com/gpg | sudo apt-key add -
|
||||
|
||||
### Uninstallation
|
||||
|
||||
|
||||
@@ -206,6 +206,24 @@ If you need to add an HTTP Proxy, set a different directory or partition for the
|
||||
Docker runtime files, or make other customizations, read our Systemd article to
|
||||
learn how to [customize your Systemd Docker daemon options](/articles/systemd/).
|
||||
|
||||
## Running Docker with a manually-defined network
|
||||
|
||||
If you manually configure your network using `systemd-network` with `systemd` version 219 or higher, containers you start with Docker may be unable to access your network.
|
||||
Beginning with version 220, the forwarding setting for a given network (`net.ipv4.conf.<interface>.forwarding`) defaults to *off*. This setting prevents IP forwarding. It also conflicts with Docker which enables the `net.ipv4.conf.all.forwarding` setting within a container.
|
||||
|
||||
To work around this, edit the `<interface>.network` file in
|
||||
`/usr/lib/systemd/network/` on your Docker host (ex: `/usr/lib/systemd/network/80-container-host0.network`) add the following block:
|
||||
|
||||
```
|
||||
[Network]
|
||||
...
|
||||
IPForward=kernel
|
||||
# OR
|
||||
IPForward=true
|
||||
...
|
||||
```
|
||||
|
||||
This configuration allows IP forwarding from the container as expected.
|
||||
|
||||
## Uninstall
|
||||
|
||||
|
||||
@@ -10,37 +10,34 @@ parent = "smn_engine"
|
||||
|
||||
# Mac OS X
|
||||
|
||||
You can install Docker using Boot2Docker to run `docker` commands at your command-line.
|
||||
Choose this installation if you are familiar with the command-line or plan to
|
||||
contribute to the Docker project on GitHub.
|
||||
> **Note**: This release of Docker deprecates the Boot2Docker command line in
|
||||
> favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as
|
||||
> well as the other Docker tools.
|
||||
|
||||
[<img src="/installation/images/kitematic.png" alt="Download Kitematic"
|
||||
style="float:right;">](https://kitematic.com/download)
|
||||
You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools:
|
||||
|
||||
Alternatively, you may want to try <a id="inlinelink" href="https://kitematic.com/"
|
||||
target="_blank">Kitematic</a>, an application that lets you set up Docker and
|
||||
run containers using a graphical user interface (GUI).
|
||||
|
||||
## Command-line Docker with Boot2Docker
|
||||
* Docker Machine for running the `docker-machine` binary
|
||||
* Docker Engine for running the `docker` binary
|
||||
* Docker Compose for running the `docker-compose` binary
|
||||
* Kitematic, the Docker GUI
|
||||
* a shell preconfigured for a Docker command-line environment
|
||||
* Oracle VM VirtualBox
|
||||
|
||||
Because the Docker daemon uses Linux-specific kernel features, you can't run
|
||||
Docker natively in OS X. Instead, you must install the Boot2Docker application.
|
||||
The application includes a VirtualBox Virtual Machine (VM), Docker itself, and the
|
||||
Boot2Docker management tool.
|
||||
|
||||
The Boot2Docker management tool is a lightweight Linux virtual machine made
|
||||
specifically to run the Docker daemon on Mac OS X. The VirtualBox VM runs
|
||||
completely from RAM, is a small ~24MB download, and boots in approximately 5s.
|
||||
Docker natively in OS X. Instead, you must use `docker-machine` to create and
|
||||
attach to a virtual machine (VM). This machine is a Linux VM that hosts Docker
|
||||
for you on your Mac.
|
||||
|
||||
**Requirements**
|
||||
|
||||
Your Mac must be running OS X 10.6 "Snow Leopard" or newer to run Boot2Docker.
|
||||
Your Mac must be running OS X 10.8 "Mountain Lion" or newer to install the
|
||||
Docker Toolbox.
|
||||
|
||||
### Learn the key concepts before installing
|
||||
|
||||
In a Docker installation on Linux, your machine is both the localhost and the
|
||||
Docker host. In networking, localhost means your computer. The Docker host is
|
||||
the machine on which the containers run.
|
||||
In a Docker installation on Linux, your physical machine is both the localhost
|
||||
and the Docker host. In networking, localhost means your computer. The Docker
|
||||
host is the computer on which the containers run.
|
||||
|
||||
On a typical Linux installation, the Docker client, the Docker daemon, and any
|
||||
containers run directly on your localhost. This means you can address ports on a
|
||||
@@ -49,135 +46,243 @@ Docker container using standard localhost addressing such as `localhost:8000` or
|
||||
|
||||

|
||||
|
||||
In an OS X installation, the `docker` daemon is running inside a Linux virtual
|
||||
machine provided by Boot2Docker.
|
||||
In an OS X installation, the `docker` daemon is running inside a Linux VM called
|
||||
`default`. The `default` is a lightweight Linux VM made specifically to run
|
||||
the Docker daemon on Mac OS X. The VM runs completely from RAM, is a small ~24MB
|
||||
download, and boots in approximately 5s.
|
||||
|
||||

|
||||
|
||||
In OS X, the Docker host address is the address of the Linux VM.
|
||||
When you start the `boot2docker` process, the VM is assigned an IP address. Under
|
||||
`boot2docker` ports on a container map to ports on the VM. To see this in
|
||||
In OS X, the Docker host address is the address of the Linux VM. When you start
|
||||
the VM with `docker-machine` it is assigned an IP address. When you start a
|
||||
container, the ports on a container map to ports on the VM. To see this in
|
||||
practice, work through the exercises on this page.
|
||||
|
||||
|
||||
### Installation
|
||||
|
||||
1. Go to the [boot2docker/osx-installer ](
|
||||
https://github.com/boot2docker/osx-installer/releases/latest) release page.
|
||||
If you have VirtualBox running, you must shut it down before running the
|
||||
installer.
|
||||
|
||||
4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
|
||||
section.
|
||||
1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page.
|
||||
|
||||
3. Install Boot2Docker by double-clicking the package.
|
||||
2. Click the installer link to download.
|
||||
|
||||
The installer places Boot2Docker and VirtualBox in your "Applications" folder.
|
||||
3. Install Docker Toolbox by double-clicking the package or by right-clicking
|
||||
and choosing "Open" from the pop-up menu.
|
||||
|
||||
The installation places the `docker` and `boot2docker` binaries in your
|
||||
`/usr/local/bin` directory.
|
||||
The installer launches the "Install Docker Toolbox" dialog.
|
||||
|
||||

|
||||
|
||||
4. Press "Continue" to install the toolbox.
|
||||
|
||||
The installer presents you with options to customize the standard
|
||||
installation.
|
||||
|
||||

|
||||
|
||||
By default, the standard Docker Toolbox installation:
|
||||
|
||||
* installs binaries for the Docker tools in `/usr/local/bin`
|
||||
* makes these binaries available to all users
|
||||
* updates any existing VirtualBox installation
|
||||
|
||||
Change these defaults by pressing "Customize" or "Change
|
||||
Install Location."
|
||||
|
||||
5. Press "Install" to perform the standard installation.
|
||||
|
||||
The system prompts you for your password.
|
||||
|
||||

|
||||
|
||||
6. Provide your password to continue with the installation.
|
||||
|
||||
When it completes, the installer provides you with some information you can
|
||||
use to complete some common tasks.
|
||||
|
||||

|
||||
|
||||
7. Press "Close" to exit.
|
||||
|
||||
|
||||
## Start the Boot2Docker Application
|
||||
## Running a Docker Container
|
||||
|
||||
To run a Docker container, you first start the `boot2docker` VM and then issue
|
||||
`docker` commands to create, load, and manage containers. You can launch
|
||||
`boot2docker` from your Applications folder or from the command line.
|
||||
To run a Docker container, you:
|
||||
|
||||
> **NOTE**: Boot2Docker is designed as a development tool. You should not use
|
||||
> it in production environments.
|
||||
* create a new (or start an existing) Docker virtual machine
|
||||
* switch your environment to your new VM
|
||||
* use the `docker` client to create, load, and manage containers
|
||||
|
||||
### From the Applications folder
|
||||
Once you create a machine, you can reuse it as often as you like. Like any
|
||||
VirtualBox VM, it maintains its configuration between uses.
|
||||
|
||||
When you launch the "Boot2Docker" application from your "Applications" folder, the
|
||||
application:
|
||||
There are two ways to use the installed tools, from the Docker Quickstart Terminal or
|
||||
[from your shell](#from-your-shell).
|
||||
|
||||
* opens a terminal window
|
||||
### From the Docker Quickstart Terminal
|
||||
|
||||
* creates a $HOME/.boot2docker directory
|
||||
1. Open the "Applications" folder or the "Launchpad".
|
||||
|
||||
* creates a VirtualBox ISO and certs
|
||||
2. Find the Docker Quickstart Terminal and double-click to launch it.
|
||||
|
||||
* starts a VirtualBox VM running the `docker` daemon
|
||||
The application:
|
||||
|
||||
Once the launch completes, you can run `docker` commands. A good way to verify
|
||||
your setup succeeded is to run the `hello-world` container.
|
||||
* opens a terminal window
|
||||
* creates a VM called `default` if it doesn't exists, starts the VM if it does
|
||||
* points the terminal environment to this VM
|
||||
|
||||
$ docker run hello-world
|
||||
Unable to find image 'hello-world:latest' locally
|
||||
511136ea3c5a: Pull complete
|
||||
31cbccb51277: Pull complete
|
||||
e45a5af57b00: Pull complete
|
||||
hello-world:latest: The image you are pulling has been verified.
|
||||
Important: image verification is a tech preview feature and should not be
|
||||
relied on to provide security.
|
||||
Status: Downloaded newer image for hello-world:latest
|
||||
Hello from Docker.
|
||||
This message shows that your installation appears to be working correctly.
|
||||
Once the launch completes, the Docker Quickstart Terminal reports:
|
||||
|
||||
To generate this message, Docker took the following steps:
|
||||
1. The Docker client contacted the Docker daemon.
|
||||
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
|
||||
(Assuming it was not already locally available.)
|
||||
3. The Docker daemon created a new container from that image which runs the
|
||||
executable that produces the output you are currently reading.
|
||||
4. The Docker daemon streamed that output to the Docker client, which sent it
|
||||
to your terminal.
|
||||

|
||||
|
||||
To try something more ambitious, you can run an Ubuntu container with:
|
||||
$ docker run -it ubuntu bash
|
||||
Now, you can run `docker` commands.
|
||||
|
||||
For more examples and ideas, visit:
|
||||
http://docs.docker.com/userguide/
|
||||
3. Verify your setup succeeded by running the `hello-world` container.
|
||||
|
||||
$ docker run hello-world
|
||||
Unable to find image 'hello-world:latest' locally
|
||||
511136ea3c5a: Pull complete
|
||||
31cbccb51277: Pull complete
|
||||
e45a5af57b00: Pull complete
|
||||
hello-world:latest: The image you are pulling has been verified.
|
||||
Important: image verification is a tech preview feature and should not be
|
||||
relied on to provide security.
|
||||
Status: Downloaded newer image for hello-world:latest
|
||||
Hello from Docker.
|
||||
This message shows that your installation appears to be working correctly.
|
||||
|
||||
To generate this message, Docker took the following steps:
|
||||
1. The Docker client contacted the Docker daemon.
|
||||
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
|
||||
(Assuming it was not already locally available.)
|
||||
3. The Docker daemon created a new container from that image which runs the
|
||||
executable that produces the output you are currently reading.
|
||||
4. The Docker daemon streamed that output to the Docker client, which sent it
|
||||
to your terminal.
|
||||
|
||||
To try something more ambitious, you can run an Ubuntu container with:
|
||||
$ docker run -it ubuntu bash
|
||||
|
||||
For more examples and ideas, visit:
|
||||
http://docs.docker.com/userguide/
|
||||
|
||||
|
||||
A more typical way to start and stop `boot2docker` is using the command line.
|
||||
A more typical way to interact with the Docker tools is from your regular shell command line.
|
||||
|
||||
### From your command line
|
||||
### From your shell
|
||||
|
||||
Initialize and run `boot2docker` from the command line, do the following:
|
||||
This section assumes you are running a Bash shell. You may be running a
|
||||
different shell such as C Shell but the commands are the same.
|
||||
|
||||
1. Create a new Boot2Docker VM.
|
||||
1. Create a new Docker VM.
|
||||
|
||||
$ boot2docker init
|
||||
$ docker-machine create --driver virtualbox default
|
||||
Creating VirtualBox VM...
|
||||
Creating SSH key...
|
||||
Starting VirtualBox VM...
|
||||
Starting VM...
|
||||
To see how to connect Docker to this machine, run: docker-machine env default
|
||||
|
||||
This creates a new virtual machine. You only need to run this command once.
|
||||
This creates a new `default` in VirtualBox.
|
||||
|
||||
2. Start the `boot2docker` VM.
|
||||

|
||||
|
||||
$ boot2docker start
|
||||
The command also creates a machine configuration in the
|
||||
`~/.docker/machine/machines/default` directory. You only need to run the
|
||||
`create` command once. Then, you can use `docker-machine` to start, stop,
|
||||
query, and otherwise manage the VM from the command line.
|
||||
|
||||
3. Display the environment variables for the Docker client.
|
||||
2. List your available machines.
|
||||
|
||||
$ boot2docker shellinit
|
||||
Writing /Users/mary/.boot2docker/certs/boot2docker-vm/ca.pem
|
||||
Writing /Users/mary/.boot2docker/certs/boot2docker-vm/cert.pem
|
||||
Writing /Users/mary/.boot2docker/certs/boot2docker-vm/key.pem
|
||||
export DOCKER_HOST=tcp://192.168.59.103:2376
|
||||
export DOCKER_CERT_PATH=/Users/mary/.boot2docker/certs/boot2docker-vm
|
||||
export DOCKER_TLS_VERIFY=1
|
||||
$ docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
default * virtualbox Running tcp://192.168.99.101:2376
|
||||
|
||||
The specific paths and address on your machine will be different.
|
||||
If you have previously installed the deprecated Boot2Docker application or
|
||||
run the Docker Quickstart Terminal, you may have a `dev` VM as well. When you
|
||||
created `default`, the `docker-machine` command provided instructions
|
||||
for learning how to connect the VM.
|
||||
|
||||
4. To set the environment variables in your shell do the following:
|
||||
3. Get the environment commands for your new VM.
|
||||
|
||||
$ eval "$(boot2docker shellinit)"
|
||||
$ docker-machine env default
|
||||
export DOCKER_TLS_VERIFY="1"
|
||||
export DOCKER_HOST="tcp://192.168.99.101:2376"
|
||||
export DOCKER_CERT_PATH="/Users/mary/.docker/machine/machines/default"
|
||||
export DOCKER_MACHINE_NAME="default"
|
||||
# Run this command to configure your shell:
|
||||
# eval "$(docker-machine env default)"
|
||||
|
||||
You can also set them manually by using the `export` commands `boot2docker`
|
||||
returns.
|
||||
4. Connect your shell to the `default` machine.
|
||||
|
||||
$ eval "$(docker-machine env default)"
|
||||
|
||||
5. Run the `hello-world` container to verify your setup.
|
||||
|
||||
$ docker run hello-world
|
||||
|
||||
|
||||
## Basic Boot2Docker exercises
|
||||
## Learn about your Toolbox installation
|
||||
|
||||
At this point, you should have `boot2docker` running and the `docker` client
|
||||
environment initialized. To verify this, run the following commands:
|
||||
Toolbox installs the Docker Engine binary, the Docker binary on your system. When you
|
||||
use the Docker Quickstart Terminal or create a `default` manually, Docker
|
||||
Machine updates the `~/.docker/machine/machines/default` folder to your
|
||||
system. This folder contains the configuration for the VM.
|
||||
|
||||
$ boot2docker status
|
||||
$ docker version
|
||||
You can create multiple VMs on your system with Docker Machine. So, you may have
|
||||
more than one VM folder if you have more than one VM. To remove a VM, use the
|
||||
`docker-machine rm <machine-name>` command.
|
||||
|
||||
Work through this section to try some practical container tasks using `boot2docker` VM.
|
||||
## Migrate from Boot2Docker
|
||||
|
||||
If you were using Boot2Docker previously, you have a pre-existing Docker
|
||||
`boot2docker-vm` VM on your local system. To allow Docker Machine to manage
|
||||
this older VM, you can migrate it.
|
||||
|
||||
1. Open a terminal or the Docker CLI on your system.
|
||||
|
||||
2. Type the following command.
|
||||
|
||||
$ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm
|
||||
|
||||
3. Use the `docker-machine` command to interact with the migrated VM.
|
||||
|
||||
The `docker-machine` subcommands are slightly different than the `boot2docker`
|
||||
subcommands. The table below lists the equivalent `docker-machine` subcommand
|
||||
and what it does:
|
||||
|
||||
| `boot2docker` | `docker-machine` | `docker-machine` description |
|
||||
|----------------|------------------|----------------------------------------------------------|
|
||||
| init | create | Creates a new docker host. |
|
||||
| up | start | Starts a stopped machine. |
|
||||
| ssh | ssh | Runs a command or interactive ssh session on the machine.|
|
||||
| save | - | Not applicable. |
|
||||
| down | stop | Stops a running machine. |
|
||||
| poweroff | stop | Stops a running machine. |
|
||||
| reset | restart | Restarts a running machine. |
|
||||
| config | inspect | Prints machine configuration details. |
|
||||
| status | ls | Lists all machines and their status. |
|
||||
| info | inspect | Displays a machine's details. |
|
||||
| ip | ip | Displays the machine's ip address. |
|
||||
| shellinit | env | Displays shell commands needed to configure your shell to interact with a machine |
|
||||
| delete | rm | Removes a machine. |
|
||||
| download | - | Not applicable. |
|
||||
| upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. |
|
||||
|
||||
|
||||
## Example of Docker on Mac OS X
|
||||
|
||||
Work through this section to try some practical container tasks on a VM. At this
|
||||
point, you should have a VM running and be connected to it through your shell.
|
||||
To verify this, run the following commands:
|
||||
|
||||
$ docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
dev * virtualbox Running tcp://192.168.99.100:2376
|
||||
|
||||
The `ACTIVE` machine, in this case `dev`, is the one your environment is pointing to.
|
||||
|
||||
### Access container ports
|
||||
|
||||
@@ -212,11 +317,11 @@ Work through this section to try some practical container tasks using `boot2dock
|
||||
|
||||
This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is
|
||||
not the localhost address (0.0.0.0) but is instead the address of the
|
||||
`boot2docker` VM.
|
||||
your Docker VM.
|
||||
|
||||
5. Get the address of the `boot2docker` VM.
|
||||
5. Get the address of the `dev` VM.
|
||||
|
||||
$ boot2docker ip
|
||||
$ docker-machine ip dev
|
||||
192.168.59.103
|
||||
|
||||
6. Enter the `http://192.168.59.103:49157` address in your browser:
|
||||
@@ -232,7 +337,7 @@ Work through this section to try some practical container tasks using `boot2dock
|
||||
|
||||
### Mount a volume on the container
|
||||
|
||||
When you start `boot2docker`, it automatically shares your `/Users` directory
|
||||
When you start a container it automatically shares your `/Users/username` directory
|
||||
with the VM. You can use this share point to mount directories onto your container.
|
||||
The next exercise demonstrates how to do this.
|
||||
|
||||
@@ -254,7 +359,8 @@ The next exercise demonstrates how to do this.
|
||||
|
||||
5. Start a new `nginx` container and replace the `html` folder with your `site` directory.
|
||||
|
||||
$ docker run -d -P -v $HOME/site:/usr/share/nginx/html --name mysite nginx
|
||||
$ docker run -d -P -v $HOME/site:/usr/share/nginx/html \
|
||||
--name mysite nginx
|
||||
|
||||
6. Get the `mysite` container's port.
|
||||
|
||||
@@ -274,85 +380,53 @@ The next exercise demonstrates how to do this.
|
||||
|
||||

|
||||
|
||||
9. Stop and then remove your running `mysite` container.
|
||||
10. Stop and then remove your running `mysite` container.
|
||||
|
||||
$ docker stop mysite
|
||||
$ docker rm mysite
|
||||
|
||||
## Upgrade Boot2Docker
|
||||
|
||||
If you running Boot2Docker 1.4.1 or greater, you can upgrade Boot2Docker from
|
||||
the command line. If you are running an older version, you should use the
|
||||
package provided by the `boot2docker` repository.
|
||||
## Upgrade Docker Toolbox
|
||||
|
||||
### From the command line
|
||||
|
||||
To upgrade from 1.4.1 or greater, you can do this:
|
||||
|
||||
1. Open a terminal on your local machine.
|
||||
|
||||
2. Stop the `boot2docker` application.
|
||||
|
||||
$ boot2docker stop
|
||||
|
||||
3. Run the upgrade command.
|
||||
|
||||
$ boot2docker upgrade
|
||||
To upgrade Docker Toolbox, download an re-run [the Docker Toolbox
|
||||
installer](https://docker.com/toolbox/).
|
||||
|
||||
|
||||
### Use the installer
|
||||
## Uninstall Docker Toolbox
|
||||
|
||||
To upgrade any version of Boot2Docker, do this:
|
||||
To uninstall, do the following:
|
||||
|
||||
1. Open a terminal on your local machine.
|
||||
1. List your machines.
|
||||
|
||||
2. Stop the `boot2docker` application.
|
||||
$ docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
dev * virtualbox Running tcp://192.168.99.100:2376
|
||||
my-docker-machine virtualbox Stopped
|
||||
default virtualbox Stopped
|
||||
|
||||
$ boot2docker stop
|
||||
2. Remove each machine.
|
||||
|
||||
3. Go to the [boot2docker/osx-installer ](
|
||||
https://github.com/boot2docker/osx-installer/releases/latest) release page.
|
||||
$ docker-machine rm dev
|
||||
Successfully removed dev
|
||||
|
||||
4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
|
||||
section.
|
||||
Removing a machine deletes its VM from VirtualBox and from the
|
||||
`~/.docker/machine/machines` directory.
|
||||
|
||||
2. Install Boot2Docker by double-clicking the package.
|
||||
3. Remove the Docker Quickstart Terminal and Kitematic from your "Applications" folder.
|
||||
|
||||
The installer places Boot2Docker in your "Applications" folder.
|
||||
4. Remove the `docker`, `docker-compose`, and `docker-machine` commands from the `/usr/local/bin` folder.
|
||||
|
||||
$ rm /usr/local/bin/docker
|
||||
|
||||
5. Delete the `~/.docker` folder from your system.
|
||||
|
||||
|
||||
## Uninstallation
|
||||
## Learning more
|
||||
|
||||
1. Go to the [boot2docker/osx-installer ](
|
||||
https://github.com/boot2docker/osx-installer/releases/latest) release page.
|
||||
Use `docker-machine help` to list the full command line reference for Docker Machine. For more
|
||||
information about using SSH or SCP to access a VM, see [the Docker Machine
|
||||
documentation](https://docs.docker.com/machine/).
|
||||
|
||||
2. Download the source code by clicking `Source code (zip)` or
|
||||
`Source code (tar.gz)` in the "Downloads" section.
|
||||
|
||||
3. Extract the source code.
|
||||
|
||||
4. Open a terminal on your local machine.
|
||||
|
||||
5. Change to the directory where you extracted the source code:
|
||||
|
||||
$ cd <path to extracted source code>
|
||||
|
||||
6. Make sure the uninstall.sh script is executable:
|
||||
|
||||
$ chmod +x uninstall.sh
|
||||
|
||||
7. Run the uninstall.sh script:
|
||||
|
||||
$ ./uninstall.sh
|
||||
|
||||
|
||||
## Learning more and acknowledgement
|
||||
|
||||
Use `boot2docker help` to list the full command line reference. For more
|
||||
information about using SSH or SCP to access the Boot2Docker VM, see the README
|
||||
at [Boot2Docker repository](https://github.com/boot2docker/boot2docker).
|
||||
|
||||
Thanks to Chris Jones whose [blog](http://viget.com/extend/how-to-use-docker-on-os-x-the-missing-guide)
|
||||
inspired me to redo this page.
|
||||
|
||||
Continue with the [Docker User Guide](/userguide).
|
||||
You can continue with the [Docker User Guide](/userguide). If you are
|
||||
interested in using the Kitematic GUI, see the [Kitermatic user
|
||||
guide](/kitematic/userguide/).
|
||||
|
||||
@@ -111,18 +111,18 @@ install Docker using the following:
|
||||
|
||||
1. Log into your Ubuntu installation as a user with `sudo` privileges.
|
||||
|
||||
2. Verify that you have `wget` installed.
|
||||
2. Verify that you have `curl` installed.
|
||||
|
||||
$ which wget
|
||||
$ which curl
|
||||
|
||||
If `wget` isn't installed, install it after updating your manager:
|
||||
If `curl` isn't installed, install it after updating your manager:
|
||||
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install wget
|
||||
$ sudo apt-get install curl
|
||||
|
||||
3. Get the latest Docker package.
|
||||
|
||||
$ wget -qO- https://get.docker.com/ | sh
|
||||
$ curl -sSL https://get.docker.com/ | sh
|
||||
|
||||
The system prompts you for your `sudo` password. Then, it downloads and
|
||||
installs Docker and its dependencies.
|
||||
@@ -132,7 +132,7 @@ install Docker using the following:
|
||||
>command fails for the Docker repo during installation. To work around this,
|
||||
>add the key directly using the following:
|
||||
>
|
||||
> $ wget -qO- https://get.docker.com/gpg | sudo apt-key add -
|
||||
> $ curl -sSL https://get.docker.com/gpg | sudo apt-key add -
|
||||
|
||||
4. Verify `docker` is installed correctly.
|
||||
|
||||
@@ -197,9 +197,14 @@ When users run Docker, they may see these messages when working with an image:
|
||||
WARNING: Your kernel does not support cgroup swap limit. WARNING: Your
|
||||
kernel does not support swap limit capabilities. Limitation discarded.
|
||||
|
||||
To prevent these messages, enable memory and swap accounting on your system. To
|
||||
enable these on system using GNU GRUB (GNU GRand Unified Bootloader), do the
|
||||
following.
|
||||
To prevent these messages, enable memory and swap accounting on your
|
||||
system. Enabling memory and swap accounting does induce both a memory
|
||||
overhead and a performance degradation even when Docker is not in
|
||||
use. The memory overhead is about 1% of the total available
|
||||
memory. The performance degradation is roughly 10%.
|
||||
|
||||
To enable memory and swap on system using GNU GRUB (GNU GRand Unified
|
||||
Bootloader), do the following:
|
||||
|
||||
1. Log into Ubuntu as a user with `sudo` privileges.
|
||||
|
||||
@@ -339,9 +344,9 @@ to start the docker daemon on boot
|
||||
|
||||
## Upgrade Docker
|
||||
|
||||
To install the latest version of Docker with `wget`:
|
||||
To install the latest version of Docker with `curl`:
|
||||
|
||||
$ wget -qO- https://get.docker.com/ | sh
|
||||
$ curl -sSL https://get.docker.com/ | sh
|
||||
|
||||
## Uninstallation
|
||||
|
||||
|
||||
@@ -9,165 +9,357 @@ parent = "smn_engine"
|
||||
<![end-metadata]-->
|
||||
|
||||
# Windows
|
||||
> **Note:**
|
||||
> Docker has been tested on Windows 7 and 8.1; it may also run on older versions.
|
||||
> Your processor needs to support hardware virtualization.
|
||||
|
||||
The Docker Engine uses Linux-specific kernel features, so to run it on Windows
|
||||
we need to use a lightweight virtual machine (VM). You use the **Windows Docker
|
||||
Client** to control the virtualized Docker Engine to build, run, and manage
|
||||
Docker containers.
|
||||
> **Note**: This release of Docker deprecates the Boot2Docker command line in
|
||||
> favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as
|
||||
> well as the other Docker tools.
|
||||
|
||||
To make this process easier, we've designed a helper application called
|
||||
[Boot2Docker](https://github.com/boot2docker/boot2docker) which creates a Linux virtual
|
||||
machine on Windows to run Docker on a Linux operating system.
|
||||
You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools:
|
||||
|
||||
Although you will be using Windows Docker client, the docker engine hosting the
|
||||
containers will still be running on Linux. Until the Docker engine for Windows
|
||||
is developed, you can launch only Linux containers from your Windows machine.
|
||||
* Docker Machine for running the `docker-machine` binary
|
||||
* Docker Engine for running the `docker` binary
|
||||
* Kitematic, the Docker GUI
|
||||
* a shell preconfigured for a Docker command-line environment
|
||||
* Oracle VM VirtualBox
|
||||
|
||||
Because the Docker daemon uses Linux-specific kernel features, you can't run
|
||||
Docker natively in Windows. Instead, you must use `docker-machine` to create and attach to a Docker VM on your machine. This VM hosts Docker for you on your Windows system.
|
||||
|
||||
The Docker VM is lightweight Linux virtual machine made specifically to run the
|
||||
Docker daemon on Windows. The VirtualBox VM runs completely from RAM, is a
|
||||
small ~24MB download, and boots in approximately 5s.
|
||||
|
||||
## Requirements
|
||||
|
||||
Your machine must be running Windows 7.1, 8/8.1 or newer to run Docker. Windows 10 is not currently supported. To find out what version of Windows you have:
|
||||
|
||||
1. Right click the Windows message and choose **System**.
|
||||
|
||||

|
||||
|
||||
If you aren't using a supported version, you could consider upgrading your
|
||||
operating system.
|
||||
|
||||
2. Make sure your Windows system supports Hardware Virtualization Technology and that virtualization is enabled.
|
||||
|
||||
#### For Windows 8 or 8.1
|
||||
|
||||
Choose **Start > Task Manager** and navigate to the **Performance** tab.
|
||||
Under **CPU** you should see the following:
|
||||
|
||||

|
||||
|
||||
If virtualization is not enabled on your system, follow the manufacturer's instructions for enabling it.
|
||||
|
||||
### For Windows 7
|
||||
|
||||
Run the <a
|
||||
href="http://www.microsoft.com/en-us/download/details.aspx?id=592"
|
||||
target="_blank"> Microsoft® Hardware-Assisted Virtualization Detection
|
||||
Tool</a> and follow the on-screen instructions.
|
||||
|
||||
|
||||
> **Note**: If you have Docker hosts running and you don't wish to do a Docker Toolbox
|
||||
installation, you can install the `docker.exe` using the *unofficial* Windows package
|
||||
manager Chocolately. For information on how to do this, see [Docker package on
|
||||
Chocolatey](http://chocolatey.org/packages/docker).
|
||||
|
||||
### Learn the key concepts before installing
|
||||
|
||||
In a Docker installation on Linux, your machine is both the localhost and the
|
||||
Docker host. In networking, localhost means your computer. The Docker host is
|
||||
the machine on which the containers run.
|
||||
|
||||
On a typical Linux installation, the Docker client, the Docker daemon, and any
|
||||
containers run directly on your localhost. This means you can address ports on a
|
||||
Docker container using standard localhost addressing such as `localhost:8000` or
|
||||
`0.0.0.0:8376`.
|
||||
|
||||

|
||||
|
||||
In an Windows installation, the `docker` daemon is running inside a Linux virtual
|
||||
machine. You use the Windows Docker client to talk to the Docker host VM. Your
|
||||
Docker containers run inside this host.
|
||||
|
||||

|
||||
|
||||
## Demonstration
|
||||
In Windows, the Docker host address is the address of the Linux VM. When you
|
||||
start the VM with `docker-machine` it is assigned an IP address. When you start
|
||||
a container, the ports on a container map to ports on the VM. To see this in
|
||||
practice, work through the exercises on this page.
|
||||
|
||||
<iframe width="640" height="480" src="//www.youtube.com/embed/TjMU3bDX4vo?rel=0" frameborder="0" allowfullscreen></iframe>
|
||||
|
||||
## Installation
|
||||
### Installation
|
||||
|
||||
1. Download the latest release of the
|
||||
[Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases/latest).
|
||||
2. Run the installer, which will install Docker Client for Windows, VirtualBox,
|
||||
Git for Windows (MSYS-git), the boot2docker Linux ISO, and the Boot2Docker
|
||||
management tool.
|
||||

|
||||
3. Run the **Boot2Docker Start** shortcut from your Desktop or “Program Files →
|
||||
Boot2Docker for Windows”.
|
||||
The Start script will ask you to enter an ssh key passphrase - the simplest
|
||||
(but least secure) is to just hit [Enter].
|
||||
If you have VirtualBox running, you must shut it down before running the
|
||||
installer.
|
||||
|
||||
4. The **Boot2Docker Start** will start a unix shell already configured to manage
|
||||
Docker running inside the virtual machine. Run `docker version` to see
|
||||
if it is working correctly:
|
||||
1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page.
|
||||
|
||||

|
||||
2. Click the installer link to download.
|
||||
|
||||
## Running Docker
|
||||
3. Install Docker Toolbox by double-clicking the installer.
|
||||
|
||||
> **Note:** if you are using a remote Docker daemon, such as Boot2Docker,
|
||||
> then _do not_ type the `sudo` before the `docker` commands shown in the
|
||||
> documentation's examples.
|
||||
The installer launches the "Setup - Docker Toolbox" dialog.
|
||||
|
||||
**Boot2Docker Start** will automatically start a shell with environment variables
|
||||
correctly set so you can start using Docker right away:
|
||||

|
||||
|
||||
Let's try the `hello-world` example image. Run
|
||||
4. Press "Next" to install the toolbox.
|
||||
|
||||
$ docker run hello-world
|
||||
The installer presents you with options to customize the standard
|
||||
installation. By default, the standard Docker Toolbox installation:
|
||||
|
||||
* installs executables for the Docker tools in `C:\Program Files\Docker Toolbox`
|
||||
* updates any existing VirtualBox installation
|
||||
* adds a Docker Inc. folder to your program shortcuts
|
||||
* updates your `PATH` environment variable
|
||||
* adds desktop icons for the Docker Quickstart Terminal and Kitematic
|
||||
|
||||
This installation assumes the defaults are acceptable.
|
||||
|
||||
5. Press "Next" until you reach the "Ready to Install" page.
|
||||
|
||||
The system prompts you for your password.
|
||||
|
||||

|
||||
|
||||
6. Press "Install" to continue with the installation.
|
||||
|
||||
When it completes, the installer provides you with some information you can
|
||||
use to complete some common tasks.
|
||||
|
||||

|
||||
|
||||
7. Press "Close" to exit.
|
||||
|
||||
## Running a Docker Container
|
||||
|
||||
To run a Docker container, you:
|
||||
|
||||
* create a new (or start an existing) Docker virtual machine
|
||||
* switch your environment to your new VM
|
||||
* use the `docker` client to create, load, and manage containers
|
||||
|
||||
Once you create a machine, you can reuse it as often as you like. Like any
|
||||
VirtualBox VM, it maintains its configuration between uses.
|
||||
|
||||
There are several ways to use the installed tools, from the Docker Quickstart Terminal or
|
||||
[from your shell](#from-your-shell).
|
||||
|
||||
### From the Docker Quickstart Terminal
|
||||
|
||||
1. Find the Docker Quickstart Terminal icon on your Desktop and double-click to launch it.
|
||||
|
||||
The application:
|
||||
|
||||
* opens a terminal window
|
||||
* creates a `default` if it doesn't exist, starts the VM if it does
|
||||
* points the terminal environment to this VM
|
||||
|
||||
Once the launch completes, you can run `docker` commands.
|
||||
|
||||
3. Verify your setup succeeded by running the `hello-world` container.
|
||||
|
||||
$ docker run hello-world
|
||||
Unable to find image 'hello-world:latest' locally
|
||||
511136ea3c5a: Pull complete
|
||||
31cbccb51277: Pull complete
|
||||
e45a5af57b00: Pull complete
|
||||
hello-world:latest: The image you are pulling has been verified.
|
||||
Important: image verification is a tech preview feature and should not be
|
||||
relied on to provide security.
|
||||
Status: Downloaded newer image for hello-world:latest
|
||||
Hello from Docker.
|
||||
This message shows that your installation appears to be working correctly.
|
||||
|
||||
To generate this message, Docker took the following steps:
|
||||
1. The Docker client contacted the Docker daemon.
|
||||
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
|
||||
(Assuming it was not already locally available.)
|
||||
3. The Docker daemon created a new container from that image which runs the
|
||||
executable that produces the output you are currently reading.
|
||||
4. The Docker daemon streamed that output to the Docker client, which sent it
|
||||
to your terminal.
|
||||
|
||||
To try something more ambitious, you can run an Ubuntu container with:
|
||||
$ docker run -it ubuntu bash
|
||||
|
||||
For more examples and ideas, visit:
|
||||
http://docs.docker.com/userguide/
|
||||
|
||||
This should download the very small `hello-world` image and print a
|
||||
`Hello from Docker.` message.
|
||||
|
||||
## Using Docker from Windows Command Line Prompt (cmd.exe)
|
||||
|
||||
Launch a Windows Command Line Prompt (cmd.exe).
|
||||
1. Launch a Windows Command Line Prompt (cmd.exe).
|
||||
|
||||
Boot2Docker command requires `ssh.exe` to be in the PATH, therefore we need to
|
||||
include `bin` folder of the Git installation (which has ssh.exe) to the `%PATH%`
|
||||
environment variable by running:
|
||||
The `docker-machine` command requires `ssh.exe` in your `PATH` environment
|
||||
variable. This `.exe` is in the MsysGit `bin` folder.
|
||||
|
||||
set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
|
||||
2. Add this to the `%PATH%` environment variable by running:
|
||||
|
||||
and then we can run the `boot2docker start` command to start the Boot2Docker VM.
|
||||
(Run `boot2docker init` command if you get an error saying machine does not
|
||||
exist.) Then copy the instructions for cmd.exe to set the environment variables
|
||||
to your console window and you are ready to run docker commands such as
|
||||
`docker ps`:
|
||||
set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
|
||||
|
||||

|
||||
3. Create a new Docker VM.
|
||||
|
||||
docker-machine create --driver virtualbox my-default
|
||||
Creating VirtualBox VM...
|
||||
Creating SSH key...
|
||||
Starting VirtualBox VM...
|
||||
Starting VM...
|
||||
To see how to connect Docker to this machine, run: docker-machine env my-default
|
||||
|
||||
The command also creates a machine configuration in the
|
||||
`C:\USERS\USERNAME\.docker\machine\machines` directory. You only need to run the `create`
|
||||
command once. Then, you can use `docker-machine` to start, stop, query, and
|
||||
otherwise manage the VM from the command line.
|
||||
|
||||
4. List your available machines.
|
||||
|
||||
C:\Users\mary> docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
my-default * virtualbox Running tcp://192.168.99.101:2376
|
||||
|
||||
If you have previously installed the deprecated Boot2Docker application or
|
||||
run the Docker Quickstart Terminal, you may have a `dev` VM as well.
|
||||
|
||||
5. Get the environment commands for your new VM.
|
||||
|
||||
C:\Users\mary> docker-machine env --shell cmd my-default
|
||||
|
||||
6. Connect your shell to the `my-default` machine.
|
||||
|
||||
C:\Users\mary> eval "$(docker-machine env my-default)"
|
||||
|
||||
7. Run the `hello-world` container to verify your setup.
|
||||
|
||||
C:\Users\mary> docker run hello-world
|
||||
|
||||
## Using Docker from PowerShell
|
||||
|
||||
Launch a PowerShell window, then add `ssh.exe` to your PATH:
|
||||
1. Launch a Windows PowerShell window.
|
||||
|
||||
$Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
|
||||
2. Add `ssh.exe` to your PATH:
|
||||
|
||||
and after running the `boot2docker start` command it will print PowerShell
|
||||
commands to set the environment variables to connect to the Docker daemon
|
||||
running inside the VM. Run these commands and you are ready to run docker
|
||||
commands such as `docker ps`:
|
||||
PS C:\Users\mary> $Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
|
||||
|
||||

|
||||
3. Create a new Docker VM.
|
||||
|
||||
> NOTE: You can alternatively run `boot2docker shellinit | Invoke-Expression`
|
||||
> command to set the environment variables instead of copying and pasting on
|
||||
> PowerShell.
|
||||
PS C:\Users\mary> docker-machine create --driver virtualbox my-default
|
||||
|
||||
# Further Details
|
||||
4. List your available machines.
|
||||
|
||||
The Boot2Docker management tool provides several commands:
|
||||
C:\Users\mary> docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
my-default * virtualbox Running tcp://192.168.99.101:2376
|
||||
|
||||
$ boot2docker
|
||||
Usage: boot2docker.exe [<options>] {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|ip|shellinit|delete|download|upgrade|version} [<args>]
|
||||
5. Get the environment commands for your new VM.
|
||||
|
||||
## Upgrading
|
||||
C:\Users\mary> docker-machine env --shell powershell my-default
|
||||
|
||||
1. Download the latest release of the [Docker for Windows Installer](
|
||||
https://github.com/boot2docker/windows-installer/releases/latest)
|
||||
6. Connect your shell to the `my-default` machine.
|
||||
|
||||
2. Run the installer, which will update the Boot2Docker management tool.
|
||||
C:\Users\mary> eval "$(docker-machine env my-default)"
|
||||
|
||||
3. To upgrade your existing virtual machine, open a terminal and run:
|
||||
7. Run the `hello-world` container to verify your setup.
|
||||
|
||||
boot2docker stop
|
||||
boot2docker download
|
||||
boot2docker start
|
||||
C:\Users\mary> docker run hello-world
|
||||
|
||||
|
||||
## Learn about your Toolbox installation
|
||||
|
||||
Toolbox installs the Docker Engine binary in the `C:\Program Files\Docker
|
||||
Toolbox` directory. When you use the Docker Quickstart Terminal or create a
|
||||
`default` manually, Docker Machine updates the
|
||||
`C:\USERS\USERNAME\.docker\machine\machines\default` folder to your
|
||||
system. This folder contains the configuration for the VM.
|
||||
|
||||
You can create multiple VMs on your system with Docker Machine. So, you may have
|
||||
more than one VM folder if you have more than one VM. To remove a VM, use the
|
||||
`docker-machine rm <machine-name>` command.
|
||||
|
||||
## Migrate from Boot2Docker
|
||||
|
||||
If you were using Boot2Docker previously, you have a pre-existing Docker
|
||||
`boot2docker-vm` VM on your local system. To allow Docker Machine to manage
|
||||
this older VM, you can migrate it.
|
||||
|
||||
1. Open a terminal or the Docker CLI on your system.
|
||||
|
||||
2. Type the following command.
|
||||
|
||||
$ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm
|
||||
|
||||
3. Use the `docker-machine` command to interact with the migrated VM.
|
||||
|
||||
The `docker-machine` subcommands are slightly different than the `boot2docker`
|
||||
subcommands. The table below lists the equivalent `docker-machine` subcommand
|
||||
and what it does:
|
||||
|
||||
| `boot2docker` | `docker-machine` | `docker-machine` description |
|
||||
|----------------|------------------|----------------------------------------------------------|
|
||||
| init | create | Creates a new docker host. |
|
||||
| up | start | Starts a stopped machine. |
|
||||
| ssh | ssh | Runs a command or interactive ssh session on the machine.|
|
||||
| save | - | Not applicable. |
|
||||
| down | stop | Stops a running machine. |
|
||||
| poweroff | stop | Stops a running machine. |
|
||||
| reset | restart | Restarts a running machine. |
|
||||
| config | inspect | Prints machine configuration details. |
|
||||
| status | ls | Lists all machines and their status. |
|
||||
| info | inspect | Displays a machine's details. |
|
||||
| ip | ip | Displays the machine's ip address. |
|
||||
| shellinit | env | Displays shell commands needed to configure your shell to interact with a machine |
|
||||
| delete | rm | Removes a machine. |
|
||||
| download | - | Not applicable. |
|
||||
| upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. |
|
||||
|
||||
|
||||
## Upgrade Docker Toolbox
|
||||
|
||||
To upgrade Docker Toolbox, download an re-run [the Docker Toolbox
|
||||
installer](https://www.docker.com/toolbox).
|
||||
|
||||
## Container port redirection
|
||||
|
||||
If you are curious, the username for the boot2docker default user is `docker`
|
||||
and the password is `tcuser`.
|
||||
If you are curious, the username for the Docker default user is `docker` and the
|
||||
password is `tcuser`. The latest version of `docker-machine` sets up a host only
|
||||
network adaptor which provides access to the container's ports.
|
||||
|
||||
The latest version of `boot2docker` sets up a host only network adaptor which
|
||||
provides access to the container's ports.
|
||||
If you run a container with a published port:
|
||||
|
||||
If you run a container with an exposed port:
|
||||
$ docker run --rm -i -t -p 80:80 nginx
|
||||
|
||||
docker run --rm -i -t -p 80:80 nginx
|
||||
Then you should be able to access that nginx server using the IP address
|
||||
reported to you using:
|
||||
|
||||
Then you should be able to access that nginx server using the IP address reported
|
||||
to you using:
|
||||
$ docker-machine ip
|
||||
|
||||
boot2docker ip
|
||||
|
||||
Typically, it is 192.168.59.103, but it could get changed by VirtualBox's DHCP
|
||||
implementation.
|
||||
|
||||
For further information or to report issues, please see the [Boot2Docker site](http://boot2docker.io)
|
||||
Typically, the IP is 192.168.59.103, but it could get changed by VirtualBox's
|
||||
DHCP implementation.
|
||||
|
||||
## Login with PUTTY instead of using the CMD
|
||||
|
||||
Boot2Docker generates and uses the public/private key pair in your `%USERPROFILE%\.ssh`
|
||||
directory so to log in you need to use the private key from this same directory.
|
||||
|
||||
The private key needs to be converted into the format PuTTY uses.
|
||||
|
||||
You can do this with
|
||||
Docker Machine generates and uses the public/private key pair in your
|
||||
`%USERPROFILE%\.ssh` directory so to log in you need to use the private key from
|
||||
this same directory. The private key needs to be converted into the format PuTTY
|
||||
uses. You can do this with
|
||||
[puttygen](http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html):
|
||||
|
||||
- Open `puttygen.exe` and load ("File"->"Load" menu) the private key from
|
||||
1. Open `puttygen.exe` and load ("File"->"Load" menu) the private key from
|
||||
`%USERPROFILE%\.ssh\id_boot2docker`
|
||||
- then click: "Save Private Key".
|
||||
- Then use the saved file to login with PuTTY using `docker@127.0.0.1:2022`.
|
||||
|
||||
2. Click "Save Private Key".
|
||||
|
||||
3. Use the saved file to login with PuTTY using `docker@127.0.0.1:2022`.
|
||||
|
||||
## Uninstallation
|
||||
|
||||
You can uninstall Boot2Docker using Window's standard process for removing programs.
|
||||
This process does not remove the `docker-install.exe` file. You must delete that file
|
||||
yourself.
|
||||
You can uninstall Docker Toolbox using Window's standard process for removing
|
||||
programs. This process does not remove the `docker-install.exe` file. You must
|
||||
delete that file yourself.
|
||||
|
||||
## References
|
||||
## Learn more
|
||||
|
||||
If you have Docker hosts running and if you don't wish to do a
|
||||
Boot2Docker installation, you can install the docker.exe using
|
||||
unofficial Windows package manager Chocolately. For information
|
||||
on how to do this, see [Docker package on Chocolatey](http://chocolatey.org/packages/docker).
|
||||
You can continue with the [Docker User Guide](/userguide). If you are
|
||||
interested in using the Kitematic GUI, see the [Kitermatic user
|
||||
guide](/kitematic/userguide/).
|
||||
|
||||
@@ -116,11 +116,11 @@ images, or you can download Docker images that other people have already created
|
||||
Docker images are the **build** component of Docker.
|
||||
|
||||
#### Docker registries
|
||||
Docker registries hold images. These are public or private stores from which you upload
|
||||
or download images. The public Docker registry is called
|
||||
[Docker Hub](http://hub.docker.com). It provides a huge collection of existing
|
||||
images for your use. These can be images you create yourself or you
|
||||
can use images that others have previously created. Docker registries are the
|
||||
Docker registries hold images. These are public or private stores from which you
|
||||
upload or download images. The public Docker registry is provided with the
|
||||
[Docker Hub](http://hub.docker.com). It serves a huge collection of existing
|
||||
images for your use. These can be images you create yourself or you can use
|
||||
images that others have previously created. Docker registries are the
|
||||
**distribution** component of Docker.
|
||||
|
||||
#### Docker containers
|
||||
@@ -179,8 +179,9 @@ returns a final image.
|
||||
|
||||
### How does a Docker registry work?
|
||||
The Docker registry is the store for your Docker images. Once you build a Docker
|
||||
image you can *push* it to a public registry [Docker Hub](https://hub.docker.com) or to
|
||||
your own registry running behind your firewall.
|
||||
image you can *push* it to a public registry such as the one provided by [Docker
|
||||
Hub](https://hub.docker.com) or to your own registry running behind your
|
||||
firewall.
|
||||
|
||||
Using the Docker client, you can search for already published images and then
|
||||
pull them down to your Docker host to build containers from them.
|
||||
|
||||
@@ -12,6 +12,14 @@ parent = "mn_use_docker"
|
||||
|
||||
The following list of features are deprecated.
|
||||
|
||||
### LXC built-in exec driver
|
||||
**Deprecated In Release: v1.8**
|
||||
|
||||
**Target For Removal In Release: v1.10**
|
||||
|
||||
The built-in LXC execution driver is deprecated for an external implementation.
|
||||
The lxc-conf flag and API fields will also be removed.
|
||||
|
||||
### Old Command Line Options
|
||||
**Deprecated In Release: [v1.8.0](/release-notes/#docker-engine-1-8-0)**
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ Docker currently runs only on Linux, but you can use VirtualBox to run Docker in
|
||||
a virtual machine on your box, and get the best of both worlds. Check out the
|
||||
[*Mac OS X*](../installation/mac/#macosx) and [*Microsoft
|
||||
Windows*](../installation/windows/#windows) installation guides. The small Linux
|
||||
distribution boot2docker can be run inside virtual machines on these two
|
||||
distribution Docker Machine can be run inside virtual machines on these two
|
||||
operating systems.
|
||||
|
||||
> **Note:** if you are using a remote Docker daemon, such as Boot2Docker,
|
||||
@@ -97,7 +97,7 @@ with several powerful functionalities:
|
||||
applications. Your ideal Postgresql setup can be re-used for all your future
|
||||
projects. And so on.
|
||||
|
||||
- *Sharing.* Docker has access to a [public registry](https://hub.docker.com)
|
||||
- *Sharing.* Docker has access to a public registry [on Docker Hub](https://registry.hub.docker.com/)
|
||||
where thousands of people have uploaded useful containers: anything from Redis,
|
||||
CouchDB, Postgres to IRC bouncers to Rails app servers to Hadoop to base images
|
||||
for various Linux distros. The
|
||||
|
||||
@@ -26,6 +26,9 @@ program code and documentation code.
|
||||
* Run `gofmt -s -w file.go` on each changed file before
|
||||
committing your changes. Most editors have plug-ins that do this automatically.
|
||||
|
||||
* Run `golint` on each changed file before
|
||||
committing your changes.
|
||||
|
||||
* Update the documentation when creating or modifying features.
|
||||
|
||||
* Commits that fix or close an issue should reference them in the commit message
|
||||
|
||||
@@ -29,7 +29,7 @@ you continue working with your fork on this branch.
|
||||
|
||||
## Clean your host of Docker artifacts
|
||||
|
||||
Docker developers run the latest stable release of the Docker software (with Boot2Docker if their machine is Mac OS X). They clean their local
|
||||
Docker developers run the latest stable release of the Docker software (with Docker Machine if their machine is Mac OS X). They clean their local
|
||||
hosts of unnecessary Docker artifacts such as stopped containers or unused
|
||||
images. Cleaning unnecessary artifacts isn't strictly necessary, but it is
|
||||
good practice, so it is included here.
|
||||
|
||||
@@ -57,8 +57,8 @@ target="_blank">docker/docker repository</a>.
|
||||
|
||||
$ cd ~
|
||||
|
||||
In Windows, you'll work in your Boot2Docker window instead of Powershell or
|
||||
a `cmd` window.
|
||||
In Windows, you'll work in your Docker Quickstart Terminal window instead of
|
||||
Powershell or a `cmd` window.
|
||||
|
||||
6. Create a `repos` directory.
|
||||
|
||||
|
||||
@@ -317,9 +317,9 @@ can browse the docs.
|
||||
|
||||
4. Enter the URL in your browser.
|
||||
|
||||
If you are running Boot2Docker, replace the default localhost address
|
||||
If you are using Docker Machine, replace the default localhost address
|
||||
(0.0.0.0) with your DOCKERHOST value. You can get this value at any time by
|
||||
entering `boot2docker ip` at the command line.
|
||||
entering `docker-machine ip <machine-name>` at the command line.
|
||||
|
||||
5. Once in the documentation, look for the red notice to verify you are seeing the correct build.
|
||||
|
||||
|
||||
@@ -10,502 +10,5 @@ parent = "smn_remoteapi"
|
||||
|
||||
# Docker Hub API
|
||||
|
||||
- This is the REST API for [Docker Hub](https://hub.docker.com).
|
||||
- Authorization is done with basic auth over SSL
|
||||
- Not all commands require authentication, only those noted as such.
|
||||
This API is deprecated as of 1.7. To view the old version, see the [Docker Hub API](https://docs.docker.com/v1.7/reference/api/docker-io_api/) in the 1.7 documentation.
|
||||
|
||||
# Repositories
|
||||
|
||||
## User repository
|
||||
|
||||
### Create a user repository
|
||||
|
||||
`PUT /v1/repositories/(namespace)/(repo_name)/`
|
||||
|
||||
Create a user repository with the given `namespace` and `repo_name`.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foo/bar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=write
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=write
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
### Delete a user repository
|
||||
|
||||
`DELETE /v1/repositories/(namespace)/(repo_name)/`
|
||||
|
||||
Delete a user repository with the given `namespace` and `repo_name`.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
DELETE /v1/repositories/foo/bar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
""
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 202
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=delete
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=delete
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Deleted
|
||||
- **202** – Accepted
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
## Library repository
|
||||
|
||||
### Create a library repository
|
||||
|
||||
`PUT /v1/repositories/(repo_name)/`
|
||||
|
||||
Create a library repository with the given `repo_name`.
|
||||
This is a restricted feature only available to docker admins.
|
||||
|
||||
> When namespace is missing, it is assumed to be `library`
|
||||
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foobar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=write
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=write
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
### Delete a library repository
|
||||
|
||||
`DELETE /v1/repositories/(repo_name)/`
|
||||
|
||||
Delete a library repository with the given `repo_name`.
|
||||
This is a restricted feature only available to docker admins.
|
||||
|
||||
> When namespace is missing, it is assumed to be `library`
|
||||
|
||||
|
||||
**Example Request**:
|
||||
|
||||
DELETE /v1/repositories/foobar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
""
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 202
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=delete
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=delete
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Deleted
|
||||
- **202** – Accepted
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
# Repository images
|
||||
|
||||
## User repository images
|
||||
|
||||
### Update user repository images
|
||||
|
||||
`PUT /v1/repositories/(namespace)/(repo_name)/images`
|
||||
|
||||
Update the images for a user repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foo/bar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **204** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active or permission denied
|
||||
|
||||
### List user repository images
|
||||
|
||||
`GET /v1/repositories/(namespace)/(repo_name)/images`
|
||||
|
||||
Get the images for a user repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /v1/repositories/foo/bar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"},
|
||||
{"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds",
|
||||
"checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}]
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **404** – Not found
|
||||
|
||||
## Library repository images
|
||||
|
||||
### Update library repository images
|
||||
|
||||
`PUT /v1/repositories/(repo_name)/images`
|
||||
|
||||
Update the images for a library repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foobar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **204** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active or permission denied
|
||||
|
||||
### List library repository images
|
||||
|
||||
`GET /v1/repositories/(repo_name)/images`
|
||||
|
||||
Get the images for a library repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /v1/repositories/foobar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"},
|
||||
{"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds",
|
||||
"checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}]
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **404** – Not found
|
||||
|
||||
# Repository authorization
|
||||
|
||||
## Library repository
|
||||
|
||||
### Authorize a token for a library
|
||||
|
||||
`PUT /v1/repositories/(repo_name)/auth`
|
||||
|
||||
Authorize a token for a library repo
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foobar/auth HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Token signature=123abc,repository="library/foobar",access=write
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"OK"
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **403** – Permission denied
|
||||
- **404** – Not found
|
||||
|
||||
## User repository
|
||||
|
||||
### Authorize a token for a user repository
|
||||
|
||||
`PUT /v1/repositories/(namespace)/(repo_name)/auth`
|
||||
|
||||
Authorize a token for a user repo
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foo/bar/auth HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Token signature=123abc,repository="foo/bar",access=write
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"OK"
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **403** – Permission denied
|
||||
- **404** – Not found
|
||||
|
||||
## Users
|
||||
|
||||
### User login
|
||||
|
||||
`GET /v1/users/`
|
||||
|
||||
If you want to check your login, you can try this endpoint
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /v1/users/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
OK
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – no error
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
### User register
|
||||
|
||||
`POST /v1/users/`
|
||||
|
||||
Registering a new account.
|
||||
|
||||
**Example request**:
|
||||
|
||||
POST /v1/users/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
{"email": "sam@docker.com",
|
||||
"password": "toto42",
|
||||
"username": "foobar"}
|
||||
|
||||
Json Parameters:
|
||||
|
||||
- **email** – valid email address, that needs to be confirmed
|
||||
- **username** – min 4 character, max 30 characters, must match
|
||||
the regular expression [a-z0-9_].
|
||||
- **password** – min 5 characters
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 201 OK
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"User Created"
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **201** – User Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
|
||||
### Update user
|
||||
|
||||
`PUT /v1/users/(username)/`
|
||||
|
||||
Change a password or email address for given user. If you pass in an
|
||||
email, it will add it to your account, it will not remove the old
|
||||
one. Passwords will be updated.
|
||||
|
||||
It is up to the client to verify that that password that is sent is
|
||||
the one that they want. Common approach is to have them type it
|
||||
twice.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/users/fakeuser/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
{"email": "sam@docker.com",
|
||||
"password": "toto42"}
|
||||
|
||||
Parameters:
|
||||
|
||||
- **username** – username for the person you want to update
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **204** – User Updated
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
- **404** – User not found
|
||||
|
||||
@@ -108,7 +108,7 @@ of a 404.
|
||||
You can now supply a `stream` bool to get only one set of stats and
|
||||
disconnect
|
||||
|
||||
`GET /containers(id)/logs`
|
||||
`GET /containers/(id)/logs`
|
||||
|
||||
**New!**
|
||||
|
||||
@@ -138,6 +138,7 @@ In addition, the end point now returns the new boolean fields
|
||||
This endpoint now returns `Os`, `Arch` and `KernelVersion`.
|
||||
|
||||
`POST /containers/create`
|
||||
|
||||
`POST /containers/(id)/start`
|
||||
|
||||
**New!**
|
||||
@@ -297,429 +298,4 @@ The `fromImage` and `repo` parameters now supports the `repo:tag` format.
|
||||
Consequently, the `tag` parameter is now obsolete. Using the new format and
|
||||
the `tag` parameter at the same time will return an error.
|
||||
|
||||
## v1.13
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.13*](/reference/api/docker_remote_api_v1.13/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /containers/(name)/json`
|
||||
|
||||
**New!**
|
||||
The `HostConfig.Links` field is now filled correctly
|
||||
|
||||
**New!**
|
||||
`Sockets` parameter added to the `/info` endpoint listing all the sockets the
|
||||
daemon is configured to listen on.
|
||||
|
||||
`POST /containers/(name)/start`
|
||||
`POST /containers/(name)/stop`
|
||||
|
||||
**New!**
|
||||
`start` and `stop` will now return 304 if the container's status is not modified
|
||||
|
||||
`POST /commit`
|
||||
|
||||
**New!**
|
||||
Added a `pause` parameter (default `true`) to pause the container during commit
|
||||
|
||||
## v1.12
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.12*](/reference/api/docker_remote_api_v1.12/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /build`
|
||||
|
||||
**New!**
|
||||
Build now has support for the `forcerm` parameter to always remove containers
|
||||
|
||||
`GET /containers/(name)/json`
|
||||
`GET /images/(name)/json`
|
||||
|
||||
**New!**
|
||||
All the JSON keys are now in CamelCase
|
||||
|
||||
**New!**
|
||||
Trusted builds are now Automated Builds - `is_trusted` is now `is_automated`.
|
||||
|
||||
**Removed Insert Endpoint**
|
||||
The `insert` endpoint has been removed.
|
||||
|
||||
## v1.11
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.11*](/reference/api/docker_remote_api_v1.11/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /_ping`
|
||||
|
||||
**New!**
|
||||
You can now ping the server via the `_ping` endpoint.
|
||||
|
||||
`GET /events`
|
||||
|
||||
**New!**
|
||||
You can now use the `-until` parameter to close connection
|
||||
after timestamp.
|
||||
|
||||
`GET /containers/(id)/logs`
|
||||
|
||||
This url is preferred method for getting container logs now.
|
||||
|
||||
## v1.10
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.10*](/reference/api/docker_remote_api_v1.10/)
|
||||
|
||||
### What's new
|
||||
|
||||
`DELETE /images/(name)`
|
||||
|
||||
**New!**
|
||||
You can now use the force parameter to force delete of an
|
||||
image, even if it's tagged in multiple repositories. **New!**
|
||||
You
|
||||
can now use the noprune parameter to prevent the deletion of parent
|
||||
images
|
||||
|
||||
`DELETE /containers/(id)`
|
||||
|
||||
**New!**
|
||||
You can now use the force parameter to force delete a
|
||||
container, even if it is currently running
|
||||
|
||||
## v1.9
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.9*](/reference/api/docker_remote_api_v1.9/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /build`
|
||||
|
||||
**New!**
|
||||
This endpoint now takes a serialized ConfigFile which it
|
||||
uses to resolve the proper registry auth credentials for pulling the
|
||||
base image. Clients which previously implemented the version
|
||||
accepting an AuthConfig object must be updated.
|
||||
|
||||
## v1.8
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.8*](/reference/api/docker_remote_api_v1.8/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /build`
|
||||
|
||||
**New!**
|
||||
This endpoint now returns build status as json stream. In
|
||||
case of a build error, it returns the exit status of the failed
|
||||
command.
|
||||
|
||||
`GET /containers/(id)/json`
|
||||
|
||||
**New!**
|
||||
This endpoint now returns the host config for the
|
||||
container.
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
`POST /images/(name)/insert`
|
||||
|
||||
`POST /images/(name)/push`
|
||||
|
||||
**New!**
|
||||
progressDetail object was added in the JSON. It's now
|
||||
possible to get the current value and the total of the progress
|
||||
without having to parse the string.
|
||||
|
||||
## v1.7
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.7*](/reference/api/docker_remote_api_v1.7/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /images/json`
|
||||
|
||||
The format of the json returned from this uri changed. Instead of an
|
||||
entry for each repo/tag on an image, each image is only represented
|
||||
once, with a nested attribute indicating the repo/tags that apply to
|
||||
that image.
|
||||
|
||||
Instead of:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"VirtualSize": 131506275,
|
||||
"Size": 131506275,
|
||||
"Created": 1365714795,
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Tag": "12.04",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 131506275,
|
||||
"Size": 131506275,
|
||||
"Created": 1365714795,
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Tag": "latest",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 131506275,
|
||||
"Size": 131506275,
|
||||
"Created": 1365714795,
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Tag": "precise",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 180116135,
|
||||
"Size": 24653,
|
||||
"Created": 1364102658,
|
||||
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||
"Tag": "12.10",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 180116135,
|
||||
"Size": 24653,
|
||||
"Created": 1364102658,
|
||||
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||
"Tag": "quantal",
|
||||
"Repository": "ubuntu"
|
||||
}
|
||||
]
|
||||
|
||||
The returned json looks like this:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"RepoTags": [
|
||||
"ubuntu:12.04",
|
||||
"ubuntu:precise",
|
||||
"ubuntu:latest"
|
||||
],
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Created": 1365714795,
|
||||
"Size": 131506275,
|
||||
"VirtualSize": 131506275
|
||||
},
|
||||
{
|
||||
"RepoTags": [
|
||||
"ubuntu:12.10",
|
||||
"ubuntu:quantal"
|
||||
],
|
||||
"ParentId": "27cf784147099545",
|
||||
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||
"Created": 1364102658,
|
||||
"Size": 24653,
|
||||
"VirtualSize": 180116135
|
||||
}
|
||||
]
|
||||
|
||||
`GET /images/viz`
|
||||
|
||||
This URI no longer exists. The `images --viz`
|
||||
output is now generated in the client, using the
|
||||
`/images/json` data.
|
||||
|
||||
## v1.6
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.6*](/reference/api/docker_remote_api_v1.6/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /containers/(id)/attach`
|
||||
|
||||
**New!**
|
||||
You can now split stderr from stdout. This is done by
|
||||
prefixing a header to each transmission. See
|
||||
[`POST /containers/(id)/attach`](
|
||||
/reference/api/docker_remote_api_v1.9/#attach-to-a-container "POST /containers/(id)/attach").
|
||||
The WebSocket attach is unchanged. Note that attach calls on the
|
||||
previous API version didn't change. Stdout and stderr are merged.
|
||||
|
||||
## v1.5
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.5*](/reference/api/docker_remote_api_v1.5/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
**New!**
|
||||
You can now pass registry credentials (via an AuthConfig
|
||||
object) through the X-Registry-Auth header
|
||||
|
||||
`POST /images/(name)/push`
|
||||
|
||||
**New!**
|
||||
The AuthConfig object now needs to be passed through the
|
||||
X-Registry-Auth header
|
||||
|
||||
`GET /containers/json`
|
||||
|
||||
**New!**
|
||||
The format of the Ports entry has been changed to a list of
|
||||
dicts each containing PublicPort, PrivatePort and Type describing a
|
||||
port mapping.
|
||||
|
||||
## v1.4
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.4*](/reference/api/docker_remote_api_v1.4/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
**New!**
|
||||
When pulling a repo, all images are now downloaded in parallel.
|
||||
|
||||
`GET /containers/(id)/top`
|
||||
|
||||
**New!**
|
||||
You can now use ps args with docker top, like docker top
|
||||
<container_id> aux
|
||||
|
||||
`GET /events`
|
||||
|
||||
**New!**
|
||||
Image's name added in the events
|
||||
|
||||
## v1.3
|
||||
|
||||
docker v0.5.0
|
||||
[51f6c4a](https://github.com/docker/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.3*](/reference/api/docker_remote_api_v1.3/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /containers/(id)/top`
|
||||
|
||||
List the processes running inside a container.
|
||||
|
||||
`GET /events`
|
||||
|
||||
**New!**
|
||||
Monitor docker's events via streaming or via polling
|
||||
|
||||
Builder (/build):
|
||||
|
||||
- Simplify the upload of the build context
|
||||
- Simply stream a tarball instead of multipart upload with 4
|
||||
intermediary buffers
|
||||
- Simpler, less memory usage, less disk usage and faster
|
||||
|
||||
> **Warning**:
|
||||
> The /build improvements are not reverse-compatible. Pre 1.3 clients will
|
||||
> break on /build.
|
||||
|
||||
List containers (/containers/json):
|
||||
|
||||
- You can use size=1 to get the size of the containers
|
||||
|
||||
Start containers (/containers/<id>/start):
|
||||
|
||||
- You can now pass host-specific configuration (e.g., bind mounts) in
|
||||
the POST body for start calls
|
||||
|
||||
## v1.2
|
||||
|
||||
docker v0.4.2
|
||||
[2e7649b](https://github.com/docker/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.2*](/reference/api/docker_remote_api_v1.2/)
|
||||
|
||||
### What's new
|
||||
|
||||
The auth configuration is now handled by the client.
|
||||
|
||||
The client should send it's authConfig as POST on each call of
|
||||
`/images/(name)/push`
|
||||
|
||||
`GET /auth`
|
||||
|
||||
**Deprecated.**
|
||||
|
||||
`POST /auth`
|
||||
|
||||
Only checks the configuration but doesn't store it on the server
|
||||
|
||||
Deleting an image is now improved, will only untag the image if it
|
||||
has children and remove all the untagged parents if has any.
|
||||
|
||||
`POST /images/<name>/delete`
|
||||
|
||||
Now returns a JSON structure with the list of images
|
||||
deleted/untagged.
|
||||
|
||||
## v1.1
|
||||
|
||||
docker v0.4.0
|
||||
[a8ae398](https://github.com/docker/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.1*](/reference/api/docker_remote_api_v1.1/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
`POST /images/(name)/insert`
|
||||
|
||||
`POST /images/(name)/push`
|
||||
|
||||
Uses json stream instead of HTML hijack, it looks like this:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{"status":"Pushing..."}
|
||||
{"status":"Pushing", "progress":"1/? (n/a)"}
|
||||
{"error":"Invalid..."}
|
||||
...
|
||||
|
||||
## v1.0
|
||||
|
||||
docker v0.3.4
|
||||
[8d73740](https://github.com/docker/docker/commit/8d73740343778651c09160cde9661f5f387b36f4)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.0*](/reference/api/docker_remote_api_v1.0/)
|
||||
|
||||
### What's new
|
||||
|
||||
Initial version
|
||||
|
||||
@@ -49,6 +49,11 @@ List containers
|
||||
"Created": 1367854155,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
|
||||
"Labels": {
|
||||
"com.example.vendor": "Acme",
|
||||
"com.example.license": "GPL",
|
||||
"com.example.version": "1.0"
|
||||
},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
},
|
||||
@@ -60,6 +65,7 @@ List containers
|
||||
"Created": 1367854155,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [],
|
||||
"Labels": {},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
},
|
||||
@@ -71,6 +77,7 @@ List containers
|
||||
"Created": 1367854154,
|
||||
"Status": "Exit 0",
|
||||
"Ports":[],
|
||||
"Labels": {},
|
||||
"SizeRw":12288,
|
||||
"SizeRootFs":0
|
||||
},
|
||||
@@ -82,6 +89,7 @@ List containers
|
||||
"Created": 1367854152,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [],
|
||||
"Labels": {},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
}
|
||||
|
||||
@@ -51,6 +51,11 @@ List containers
|
||||
"Created": 1367854155,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
|
||||
"Labels": {
|
||||
"com.example.vendor": "Acme",
|
||||
"com.example.license": "GPL",
|
||||
"com.example.version": "1.0"
|
||||
},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
},
|
||||
@@ -62,6 +67,7 @@ List containers
|
||||
"Created": 1367854155,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [],
|
||||
"Labels": {},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
},
|
||||
@@ -73,6 +79,7 @@ List containers
|
||||
"Created": 1367854154,
|
||||
"Status": "Exit 0",
|
||||
"Ports":[],
|
||||
"Labels": {},
|
||||
"SizeRw":12288,
|
||||
"SizeRootFs":0
|
||||
},
|
||||
@@ -84,6 +91,7 @@ List containers
|
||||
"Created": 1367854152,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [],
|
||||
"Labels": {},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
}
|
||||
|
||||
@@ -51,6 +51,11 @@ List containers
|
||||
"Created": 1367854155,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
|
||||
"Labels": {
|
||||
"com.example.vendor": "Acme",
|
||||
"com.example.license": "GPL",
|
||||
"com.example.version": "1.0"
|
||||
},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
},
|
||||
@@ -62,6 +67,7 @@ List containers
|
||||
"Created": 1367854155,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [],
|
||||
"Labels": {},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
},
|
||||
@@ -73,6 +79,7 @@ List containers
|
||||
"Created": 1367854154,
|
||||
"Status": "Exit 0",
|
||||
"Ports":[],
|
||||
"Labels": {},
|
||||
"SizeRw":12288,
|
||||
"SizeRootFs":0
|
||||
},
|
||||
@@ -84,6 +91,7 @@ List containers
|
||||
"Created": 1367854152,
|
||||
"Status": "Exit 0",
|
||||
"Ports": [],
|
||||
"Labels": {},
|
||||
"SizeRw": 12288,
|
||||
"SizeRootFs": 0
|
||||
}
|
||||
@@ -1109,7 +1117,7 @@ Query Parameters:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/x-tar
|
||||
X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInBhdGgiOiIvcm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oifQ==
|
||||
X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0=
|
||||
|
||||
{{ TAR STREAM }}
|
||||
|
||||
@@ -1120,10 +1128,10 @@ JSON object (whitespace added for readability):
|
||||
|
||||
{
|
||||
"name": "root",
|
||||
"path": "/root",
|
||||
"size": 4096,
|
||||
"mode": 2147484096,
|
||||
"mtime": "2014-02-27T20:51:23Z"
|
||||
"mtime": "2014-02-27T20:51:23Z",
|
||||
"linkTarget": ""
|
||||
}
|
||||
|
||||
A `HEAD` request can also be made to this endpoint if only this information is
|
||||
|
||||
@@ -1,761 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "The Docker Hub and the Registry v1"
|
||||
description = "Documentation for docker Registry and Registry API"
|
||||
keywords = ["docker, registry, api, hub"]
|
||||
[menu.main]
|
||||
parent="smn_hub_ref"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# The Docker Hub and the Registry v1
|
||||
|
||||
## The three roles
|
||||
|
||||
There are three major components playing a role in the Docker ecosystem.
|
||||
|
||||
### Docker Hub
|
||||
|
||||
The Docker Hub is responsible for centralizing information about:
|
||||
|
||||
- User accounts
|
||||
- Checksums of the images
|
||||
- Public namespaces
|
||||
|
||||
The Docker Hub has different components:
|
||||
|
||||
- Web UI
|
||||
- Meta-data store (comments, stars, list public repositories)
|
||||
- Authentication service
|
||||
- Tokenization
|
||||
|
||||
The Docker Hub is authoritative for that information.
|
||||
|
||||
There is only one instance of the Docker Hub, run and
|
||||
managed by Docker Inc.
|
||||
|
||||
### Docker Registry 1.0
|
||||
|
||||
The 1.0 registry has the following characteristics:
|
||||
|
||||
- It stores the images and the graph for a set of repositories
|
||||
- It does not have user accounts data
|
||||
- It has no notion of user accounts or authorization
|
||||
- It delegates authentication and authorization to the Docker Hub Auth
|
||||
service using tokens
|
||||
- It supports different storage backends (S3, cloud files, local FS)
|
||||
- It doesn't have a local database
|
||||
- [Source Code](https://github.com/docker/docker-registry)
|
||||
|
||||
We expect that there will be multiple registries out there. To help you
|
||||
grasp the context, here are some examples of registries:
|
||||
|
||||
- **sponsor registry**: such a registry is provided by a third-party
|
||||
hosting infrastructure as a convenience for their customers and the
|
||||
Docker community as a whole. Its costs are supported by the third
|
||||
party, but the management and operation of the registry are
|
||||
supported by Docker, Inc. It features read/write access, and delegates
|
||||
authentication and authorization to the Docker Hub.
|
||||
- **mirror registry**: such a registry is provided by a third-party
|
||||
hosting infrastructure but is targeted at their customers only. Some
|
||||
mechanism (unspecified to date) ensures that public images are
|
||||
pulled from a sponsor registry to the mirror registry, to make sure
|
||||
that the customers of the third-party provider can `docker pull`
|
||||
those images locally.
|
||||
- **vendor registry**: such a registry is provided by a software
|
||||
vendor who wants to distribute docker images. It would be operated
|
||||
and managed by the vendor. Only users authorized by the vendor would
|
||||
be able to get write access. Some images would be public (accessible
|
||||
for anyone), others private (accessible only for authorized users).
|
||||
Authentication and authorization would be delegated to the Docker Hub.
|
||||
The goal of vendor registries is to let someone do `docker pull
|
||||
basho/riak1.3` and automatically push from the vendor registry
|
||||
(instead of a sponsor registry); i.e., vendors get all the convenience of a
|
||||
sponsor registry, while retaining control on the asset distribution.
|
||||
- **private registry**: such a registry is located behind a firewall,
|
||||
or protected by an additional security layer (HTTP authorization,
|
||||
SSL client-side certificates, IP address authorization...). The
|
||||
registry is operated by a private entity, outside of Docker's
|
||||
control. It can optionally delegate additional authorization to the
|
||||
Docker Hub, but it is not mandatory.
|
||||
|
||||
> **Note:** The latter implies that while HTTP is the protocol
|
||||
> of choice for a registry, multiple schemes are possible (and
|
||||
> in some cases, trivial):
|
||||
>
|
||||
> - HTTP with GET (and PUT for read-write registries);
|
||||
> - local mount point;
|
||||
> - remote docker addressed through SSH.
|
||||
|
||||
The latter would only require two new commands in Docker, e.g.,
|
||||
`registryget` and `registryput`,
|
||||
wrapping access to the local filesystem (and optionally doing
|
||||
consistency checks). Authentication and authorization are then delegated
|
||||
to SSH (e.g., with public keys).
|
||||
|
||||
### Docker
|
||||
|
||||
On top of being a runtime for LXC, Docker is the Registry client. It
|
||||
supports:
|
||||
|
||||
- Push / Pull on the registry
|
||||
- Client authentication on the Docker Hub
|
||||
|
||||
## Workflow
|
||||
|
||||
### Pull
|
||||
|
||||

|
||||
|
||||
1. Contact the Docker Hub to know where I should download “samalba/busybox”
|
||||
2. Docker Hub replies: a. `samalba/busybox` is on Registry A b. here are the
|
||||
checksums for `samalba/busybox` (for all layers) c. token
|
||||
3. Contact Registry A to receive the layers for `samalba/busybox` (all of
|
||||
them to the base image). Registry A is authoritative for “samalba/busybox”
|
||||
but keeps a copy of all inherited layers and serve them all from the same
|
||||
location.
|
||||
4. registry contacts Docker Hub to verify if token/user is allowed to download images
|
||||
5. Docker Hub returns true/false lettings registry know if it should proceed or error
|
||||
out
|
||||
6. Get the payload for all layers
|
||||
|
||||
It's possible to run:
|
||||
|
||||
$ docker pull https://<registry>/repositories/samalba/busybox
|
||||
|
||||
In this case, Docker bypasses the Docker Hub. However the security is not
|
||||
guaranteed (in case Registry A is corrupted) because there won't be any
|
||||
checksum checks.
|
||||
|
||||
Currently registry redirects to s3 urls for downloads, going forward all
|
||||
downloads need to be streamed through the registry. The Registry will
|
||||
then abstract the calls to S3 by a top-level class which implements
|
||||
sub-classes for S3 and local storage.
|
||||
|
||||
Token is only returned when the `X-Docker-Token`
|
||||
header is sent with request.
|
||||
|
||||
Basic Auth is required to pull private repos. Basic auth isn't required
|
||||
for pulling public repos, but if one is provided, it needs to be valid
|
||||
and for an active account.
|
||||
|
||||
**API (pulling repository foo/bar):**
|
||||
|
||||
1. (Docker -> Docker Hub) GET /v1/repositories/foo/bar/images:
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
|
||||
X-Docker-Token: true
|
||||
|
||||
**Action**:
|
||||
|
||||
(looking up the foo/bar in db and gets images and checksums
|
||||
for that repo (all if no tag is specified, if tag, only
|
||||
checksums for those tags) see part 4.4.1)
|
||||
|
||||
2. (Docker Hub -> Docker) HTTP 200 OK
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
X-Docker-Endpoints: registry.docker.io [,registry2.docker.io]
|
||||
|
||||
**Body**:
|
||||
|
||||
Jsonified checksums (see part 4.4.1)
|
||||
|
||||
3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
|
||||
4. (Registry -> Docker Hub) GET /v1/repositories/foo/bar/images
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=read
|
||||
|
||||
**Body**:
|
||||
|
||||
<ids and checksums in payload>
|
||||
|
||||
**Action**:
|
||||
|
||||
(Lookup token see if they have access to pull.)
|
||||
|
||||
If good:
|
||||
HTTP 200 OK Docker Hub will invalidate the token
|
||||
|
||||
If bad:
|
||||
HTTP 401 Unauthorized
|
||||
|
||||
5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
|
||||
|
||||
**Action**:
|
||||
|
||||
(for each image id returned in the registry, fetch /json + /layer)
|
||||
|
||||
> **Note**:
|
||||
> If someone makes a second request, then we will always give a new token,
|
||||
> never reuse tokens.
|
||||
|
||||
### Push
|
||||
|
||||

|
||||
|
||||
1. Contact the Docker Hub to allocate the repository name “samalba/busybox”
|
||||
(authentication required with user credentials)
|
||||
2. If authentication works and namespace available, “samalba/busybox”
|
||||
is allocated and a temporary token is returned (namespace is marked
|
||||
as initialized in Docker Hub)
|
||||
3. Push the image on the registry (along with the token)
|
||||
4. Registry A contacts the Docker Hub to verify the token (token must
|
||||
corresponds to the repository name)
|
||||
5. Docker Hub validates the token. Registry A starts reading the stream
|
||||
pushed by docker and store the repository (with its images)
|
||||
6. docker contacts the Docker Hub to give checksums for upload images
|
||||
|
||||
> **Note:**
|
||||
> **It's possible not to use the Docker Hub at all!** In this case, a deployed
|
||||
> version of the Registry is deployed to store and serve images. Those
|
||||
> images are not authenticated and the security is not guaranteed.
|
||||
|
||||
> **Note:**
|
||||
> **Docker Hub can be replaced!** For a private Registry deployed, a custom
|
||||
> Docker Hub can be used to serve and validate token according to different
|
||||
> policies.
|
||||
|
||||
Docker computes the checksums and submit them to the Docker Hub at the end of
|
||||
the push. When a repository name does not have checksums on the Docker Hub,
|
||||
it means that the push is in progress (since checksums are submitted at
|
||||
the end).
|
||||
|
||||
**API (pushing repos foo/bar):**
|
||||
|
||||
1. (Docker -> Docker Hub) PUT /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token:
|
||||
true
|
||||
|
||||
**Action**:
|
||||
|
||||
- in Docker Hub, we allocated a new repository, and set to
|
||||
initialized
|
||||
|
||||
**Body**:
|
||||
|
||||
(The body contains the list of images that are going to be
|
||||
pushed, with empty checksums. The checksums will be set at
|
||||
the end of the push):
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
|
||||
|
||||
2. (Docker Hub -> Docker) 200 Created
|
||||
|
||||
**Headers**:
|
||||
|
||||
WWW-Authenticate: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
|
||||
|
||||
3. (Docker -> Registry) PUT /v1/images/98765432_parent/json
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
|
||||
4. (Registry->Docker Hub) GET /v1/repositories/foo/bar/images
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
|
||||
**Action**:
|
||||
|
||||
- Docker Hub:
|
||||
will invalidate the token.
|
||||
- Registry:
|
||||
grants a session (if token is approved) and fetches
|
||||
the images id
|
||||
|
||||
5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
6. (Docker -> Registry) PUT /v1/images/98765432/json
|
||||
|
||||
**Headers**:
|
||||
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
7. (Docker -> Registry) PUT /v1/images/98765432_parent/layer
|
||||
|
||||
**Headers**:
|
||||
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
8. (Docker -> Registry) PUT /v1/images/98765432/layer
|
||||
|
||||
**Headers**:
|
||||
|
||||
X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh
|
||||
|
||||
9. (Docker -> Registry) PUT /v1/repositories/foo/bar/tags/latest
|
||||
|
||||
**Headers**:
|
||||
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
**Body**:
|
||||
|
||||
“98765432”
|
||||
|
||||
10. (Docker -> Docker Hub) PUT /v1/repositories/foo/bar/images
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints:
|
||||
registry1.docker.io (no validation on this right now)
|
||||
|
||||
**Body**:
|
||||
|
||||
(The image, id`s, tags and checksums)
|
||||
[{“id”:
|
||||
“9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”:
|
||||
“b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
|
||||
|
||||
**Return**:
|
||||
|
||||
HTTP 204
|
||||
|
||||
> **Note:** If push fails and they need to start again, what happens in the Docker Hub,
|
||||
> there will already be a record for the namespace/name, but it will be
|
||||
> initialized. Should we allow it, or mark as name already used? One edge
|
||||
> case could be if someone pushes the same thing at the same time with two
|
||||
> different shells.
|
||||
|
||||
If it's a retry on the Registry, Docker has a cookie (provided by the
|
||||
registry after token validation). So the Docker Hub won't have to provide a
|
||||
new token.
|
||||
|
||||
### Delete
|
||||
|
||||
If you need to delete something from the Docker Hub or registry, we need a
|
||||
nice clean way to do that. Here is the workflow.
|
||||
|
||||
1. Docker contacts the Docker Hub to request a delete of a repository
|
||||
`samalba/busybox` (authentication required with user credentials)
|
||||
2. If authentication works and repository is valid, `samalba/busybox`
|
||||
is marked as deleted and a temporary token is returned
|
||||
3. Send a delete request to the registry for the repository (along with
|
||||
the token)
|
||||
4. Registry A contacts the Docker Hub to verify the token (token must
|
||||
corresponds to the repository name)
|
||||
5. Docker Hub validates the token. Registry A deletes the repository and
|
||||
everything associated to it.
|
||||
6. docker contacts the Docker Hub to let it know it was removed from the
|
||||
registry, the Docker Hub removes all records from the database.
|
||||
|
||||
> **Note**:
|
||||
> The Docker client should present an "Are you sure?" prompt to confirm
|
||||
> the deletion before starting the process. Once it starts it can't be
|
||||
> undone.
|
||||
|
||||
**API (deleting repository foo/bar):**
|
||||
|
||||
1. (Docker -> Docker Hub) DELETE /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token:
|
||||
true
|
||||
|
||||
**Action**:
|
||||
|
||||
- in Docker Hub, we make sure it is a valid repository, and set
|
||||
to deleted (logically)
|
||||
|
||||
**Body**:
|
||||
|
||||
Empty
|
||||
|
||||
2. (Docker Hub -> Docker) 202 Accepted
|
||||
|
||||
**Headers**:
|
||||
|
||||
WWW-Authenticate: Token
|
||||
signature=123abc,repository=”foo/bar”,access=delete
|
||||
X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
|
||||
# list of endpoints where this repo lives.
|
||||
|
||||
3. (Docker -> Registry) DELETE /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=delete
|
||||
|
||||
4. (Registry->Docker Hub) PUT /v1/repositories/foo/bar/auth
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=delete
|
||||
|
||||
**Action**:
|
||||
|
||||
- Docker Hub:
|
||||
will invalidate the token.
|
||||
- Registry:
|
||||
deletes the repository (if token is approved)
|
||||
|
||||
5. (Registry -> Docker) 200 OK
|
||||
|
||||
200 If success 403 if forbidden 400 if bad request 404
|
||||
if repository isn't found
|
||||
|
||||
6. (Docker -> Docker Hub) DELETE /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints:
|
||||
registry-1.docker.io (no validation on this right now)
|
||||
|
||||
**Body**:
|
||||
|
||||
Empty
|
||||
|
||||
**Return**:
|
||||
|
||||
HTTP 200
|
||||
|
||||
## How to use the Registry in standalone mode
|
||||
|
||||
The Docker Hub has two main purposes (along with its fancy social features):
|
||||
|
||||
- Resolve short names (to avoid passing absolute URLs all the time):
|
||||
|
||||
username/projectname ->
|
||||
https://registry.docker.io/users/<username>/repositories/<projectname>/
|
||||
team/projectname ->
|
||||
https://registry.docker.io/team/<team>/repositories/<projectname>/
|
||||
|
||||
- Authenticate a user as a repos owner (for a central referenced
|
||||
repository)
|
||||
|
||||
### Without a Docker Hub
|
||||
|
||||
Using the Registry without the Docker Hub can be useful to store the images
|
||||
on a private network without having to rely on an external entity
|
||||
controlled by Docker Inc.
|
||||
|
||||
In this case, the registry will be launched in a special mode
|
||||
(-standalone? ne? -no-index?). In this mode, the only thing which changes is
|
||||
that Registry will never contact the Docker Hub to verify a token. It will be
|
||||
the Registry owner responsibility to authenticate the user who pushes
|
||||
(or even pulls) an image using any mechanism (HTTP auth, IP based,
|
||||
etc...).
|
||||
|
||||
In this scenario, the Registry is responsible for the security in case
|
||||
of data corruption since the checksums are not delivered by a trusted
|
||||
entity.
|
||||
|
||||
As hinted previously, a standalone registry can also be implemented by
|
||||
any HTTP server handling GET/PUT requests (or even only GET requests if
|
||||
no write access is necessary).
|
||||
|
||||
### With a Docker Hub
|
||||
|
||||
The Docker Hub data needed by the Registry are simple:
|
||||
|
||||
- Serve the checksums
|
||||
- Provide and authorize a Token
|
||||
|
||||
In the scenario of a Registry running on a private network with the need
|
||||
of centralizing and authorizing, it's easy to use a custom Docker Hub.
|
||||
|
||||
The only challenge will be to tell Docker to contact (and trust) this
|
||||
custom Docker Hub. Docker will be configurable at some point to use a
|
||||
specific Docker Hub, it'll be the private entity responsibility (basically
|
||||
the organization who uses Docker in a private environment) to maintain
|
||||
the Docker Hub and the Docker's configuration among its consumers.
|
||||
|
||||
## The API
|
||||
|
||||
The first version of the api is available here:
|
||||
[https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md](https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md)
|
||||
|
||||
### Images
|
||||
|
||||
The format returned in the images is not defined here (for layer and
|
||||
JSON), basically because Registry stores exactly the same kind of
|
||||
information as Docker uses to manage them.
|
||||
|
||||
The format of ancestry is a line-separated list of image ids, in age
|
||||
order, i.e. the image's parent is on the last line, the parent of the
|
||||
parent on the next-to-last line, etc.; if the image has no parent, the
|
||||
file is empty.
|
||||
|
||||
GET /v1/images/<image_id>/layer
|
||||
PUT /v1/images/<image_id>/layer
|
||||
GET /v1/images/<image_id>/json
|
||||
PUT /v1/images/<image_id>/json
|
||||
GET /v1/images/<image_id>/ancestry
|
||||
PUT /v1/images/<image_id>/ancestry
|
||||
|
||||
### Users
|
||||
|
||||
### Create a user (Docker Hub)
|
||||
|
||||
POST /v1/users:
|
||||
|
||||
**Body**:
|
||||
|
||||
{"email": "[sam@docker.com](mailto:sam%40docker.com)",
|
||||
"password": "toto42", "username": "foobar"`}
|
||||
|
||||
**Validation**:
|
||||
|
||||
- **username**: min 4 character, max 30 characters, must match the
|
||||
regular expression [a-z0-9_].
|
||||
- **password**: min 5 characters
|
||||
|
||||
**Valid**:
|
||||
|
||||
return HTTP 201
|
||||
|
||||
Errors: HTTP 400 (we should create error codes for possible errors) -
|
||||
invalid json - missing field - wrong format (username, password, email,
|
||||
etc) - forbidden name - name already exists
|
||||
|
||||
> **Note**:
|
||||
> A user account will be valid only if the email has been validated (a
|
||||
> validation link is sent to the email address).
|
||||
|
||||
### Update a user (Docker Hub)
|
||||
|
||||
PUT /v1/users/<username>
|
||||
|
||||
**Body**:
|
||||
|
||||
{"password": "toto"}
|
||||
|
||||
> **Note**:
|
||||
> We can also update email address, if they do, they will need to reverify
|
||||
> their new email address.
|
||||
|
||||
### Login (Docker Hub)
|
||||
|
||||
Does nothing else but asking for a user authentication. Can be used to
|
||||
validate credentials. HTTP Basic Auth for now, maybe change in future.
|
||||
|
||||
GET /v1/users
|
||||
|
||||
**Return**:
|
||||
- Valid: HTTP 200
|
||||
- Invalid login: HTTP 401
|
||||
- Account inactive: HTTP 403 Account is not Active
|
||||
|
||||
### Tags (Registry)
|
||||
|
||||
The Registry does not know anything about users. Even though
|
||||
repositories are under usernames, it's just a namespace for the
|
||||
registry. Allowing us to implement organizations or different namespaces
|
||||
per user later, without modifying the Registry's API.
|
||||
|
||||
The following naming restrictions apply:
|
||||
|
||||
- Namespaces must match the same regular expression as usernames (See
|
||||
4.2.1.)
|
||||
- Repository names must match the regular expression [a-zA-Z0-9-_.]
|
||||
|
||||
### Get all tags:
|
||||
|
||||
GET /v1/repositories/<namespace>/<repository_name>/tags
|
||||
|
||||
**Return**: HTTP 200
|
||||
[
|
||||
{
|
||||
"layer": "9e89cc6f",
|
||||
"name": "latest"
|
||||
},
|
||||
{
|
||||
"layer": "b486531f",
|
||||
"name": "0.1.1",
|
||||
}
|
||||
]
|
||||
|
||||
**4.3.2 Read the content of a tag (resolve the image id):**
|
||||
|
||||
GET /v1/repositories/<namespace>/<repo_name>/tags/<tag>
|
||||
|
||||
**Return**:
|
||||
|
||||
"9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
|
||||
|
||||
**4.3.3 Delete a tag (registry):**
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
|
||||
|
||||
### 4.4 Images (Docker Hub)
|
||||
|
||||
For the Docker Hub to “resolve” the repository name to a Registry location,
|
||||
it uses the X-Docker-Endpoints header. In other terms, this requests
|
||||
always add a `X-Docker-Endpoints` to indicate the
|
||||
location of the registry which hosts this repository.
|
||||
|
||||
**4.4.1 Get the images:**
|
||||
|
||||
GET /v1/repositories/<namespace>/<repo_name>/images
|
||||
|
||||
**Return**: HTTP 200
|
||||
[{“id”:
|
||||
“9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”:
|
||||
“[md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087](md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087)”}]
|
||||
|
||||
### Add/update the images:
|
||||
|
||||
You always add images, you never remove them.
|
||||
|
||||
PUT /v1/repositories/<namespace>/<repo_name>/images
|
||||
|
||||
**Body**:
|
||||
|
||||
[ {“id”:
|
||||
“9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”:
|
||||
“sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}
|
||||
]
|
||||
|
||||
**Return**:
|
||||
|
||||
204
|
||||
|
||||
### Repositories
|
||||
|
||||
### Remove a Repository (Registry)
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>
|
||||
|
||||
Return 200 OK
|
||||
|
||||
### Remove a Repository (Docker Hub)
|
||||
|
||||
This starts the delete process. see 2.3 for more details.
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>
|
||||
|
||||
Return 202 OK
|
||||
|
||||
## Chaining Registries
|
||||
|
||||
It's possible to chain Registries server for several reasons:
|
||||
|
||||
- Load balancing
|
||||
- Delegate the next request to another server
|
||||
|
||||
When a Registry is a reference for a repository, it should host the
|
||||
entire images chain in order to avoid breaking the chain during the
|
||||
download.
|
||||
|
||||
The Docker Hub and Registry use this mechanism to redirect on one or the
|
||||
other.
|
||||
|
||||
Example with an image download:
|
||||
|
||||
On every request, a special header can be returned:
|
||||
|
||||
X-Docker-Endpoints: server1,server2
|
||||
|
||||
On the next request, the client will always pick a server from this
|
||||
list.
|
||||
|
||||
## Authentication and authorization
|
||||
|
||||
### On the Docker Hub
|
||||
|
||||
The Docker Hub supports both “Basic” and “Token” challenges. Usually when
|
||||
there is a `401 Unauthorized`, the Docker Hub replies
|
||||
this:
|
||||
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Basic realm="auth required",Token
|
||||
|
||||
You have 3 options:
|
||||
|
||||
1. Provide user credentials and ask for a token
|
||||
|
||||
**Header**:
|
||||
|
||||
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
|
||||
X-Docker-Token: true
|
||||
|
||||
In this case, along with the 200 response, you'll get a new token
|
||||
(if user auth is ok): If authorization isn't correct you get a 401
|
||||
response. If account isn't active you will get a 403 response.
|
||||
|
||||
**Response**:
|
||||
|
||||
200 OK
|
||||
X-Docker-Token: Token
|
||||
signature=123abc,repository=”foo/bar”,access=read
|
||||
|
||||
|
||||
2. Provide user credentials only
|
||||
|
||||
**Header**:
|
||||
|
||||
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
|
||||
|
||||
3. Provide Token
|
||||
|
||||
**Header**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=read
|
||||
|
||||
### 6.2 On the Registry
|
||||
|
||||
The Registry only supports the Token challenge:
|
||||
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Token
|
||||
|
||||
The only way is to provide a token on `401 Unauthorized`
|
||||
responses:
|
||||
|
||||
Authorization: Token signature=123abc,repository="foo/bar",access=read
|
||||
|
||||
Usually, the Registry provides a Cookie when a Token verification
|
||||
succeeded. Every time the Registry passes a Cookie, you have to pass it
|
||||
back the same cookie.:
|
||||
|
||||
200 OK
|
||||
Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly
|
||||
|
||||
Next request:
|
||||
|
||||
GET /(...)
|
||||
Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="
|
||||
|
||||
## Document version
|
||||
|
||||
- 1.0 : May 6th 2013 : initial release
|
||||
- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new
|
||||
source namespace.
|
||||
|
||||
@@ -114,12 +114,6 @@ images.
|
||||
|
||||
### Environment replacement
|
||||
|
||||
> **Note**: prior to 1.3, `Dockerfile` environment variables were handled
|
||||
> similarly, in that they would be replaced as described below. However, there
|
||||
> was no formal definition on as to which instructions handled environment
|
||||
> replacement at the time. After 1.3 this behavior will be preserved and
|
||||
> canonical.
|
||||
|
||||
Environment variables (declared with [the `ENV` statement](#env)) can also be
|
||||
used in certain instructions as variables to be interpreted by the
|
||||
`Dockerfile`. Escapes are also handled for including variable-like syntax
|
||||
|
||||
@@ -10,17 +10,13 @@ parent = "smn_cli"
|
||||
|
||||
# Using the command line
|
||||
|
||||
> **Note:** If you are using a remote Docker daemon, such as Boot2Docker,
|
||||
> then _do not_ type the `sudo` before the `docker` commands shown in the
|
||||
> documentation's examples.
|
||||
|
||||
To list available commands, either run `docker` with no parameters
|
||||
or execute `docker help`:
|
||||
|
||||
$ docker
|
||||
Usage: docker [OPTIONS] COMMAND [arg...]
|
||||
docker daemon [ --help | ... ]
|
||||
docker [ -h | --help | -v | --version ]
|
||||
docker [ --help | -v | --version ]
|
||||
|
||||
-H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ parent = "smn_cli"
|
||||
-G, --group="docker" Group for the unix socket
|
||||
-g, --graph="/var/lib/docker" Root of the Docker runtime
|
||||
-H, --host=[] Daemon socket(s) to connect to
|
||||
-h, --help=false Print usage
|
||||
--help=false Print usage
|
||||
--icc=true Enable inter-container communication
|
||||
--insecure-registry=[] Enable insecure registry communication
|
||||
--ip=0.0.0.0 Default IP when binding container ports
|
||||
|
||||
41
docs/security/apparmor.md
Normal file
41
docs/security/apparmor.md
Normal file
@@ -0,0 +1,41 @@
|
||||
AppArmor security profiles for Docker
|
||||
--------------------------------------
|
||||
|
||||
AppArmor (Application Armor) is a security module that allows a system
|
||||
administrator to associate a security profile with each program. Docker
|
||||
expects to find an AppArmor policy loaded and enforced.
|
||||
|
||||
Container profiles are loaded automatically by Docker. A profile
|
||||
for the Docker Engine itself also exists and is installed
|
||||
with the official *.deb* packages. Advanced users and package
|
||||
managers may find the profile for */usr/bin/docker* underneath
|
||||
[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
|
||||
in the Docker Engine source repository.
|
||||
|
||||
|
||||
Understand the policies
|
||||
------------------------
|
||||
|
||||
The `docker-default` profile the default for running
|
||||
containers. It is moderately protective while
|
||||
providing wide application compatability.
|
||||
|
||||
The system's standard `unconfined` profile inherits all
|
||||
system-wide policies, applying path-based policies
|
||||
intended for the host system inside of containers.
|
||||
This was the default for privileged containers
|
||||
prior to Docker 1.8.
|
||||
|
||||
|
||||
Overriding the profile for a container
|
||||
---------------------------------------
|
||||
|
||||
Users may override the AppArmor profile using the
|
||||
`security-opt` option (per-container).
|
||||
|
||||
For example, the following explicitly specifies the default policy:
|
||||
|
||||
```
|
||||
$ docker run --rm -it --security-opt apparmor:docker-default hello-world
|
||||
```
|
||||
|
||||
291
docs/security/trust/content_trust.md
Normal file
291
docs/security/trust/content_trust.md
Normal file
@@ -0,0 +1,291 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Content trust in Docker"
|
||||
description = "Enabling content trust in Docker"
|
||||
keywords = ["content, trust, security, docker, documentation"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
weight=-1
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Content trust in Docker
|
||||
|
||||
When transferring data among networked systems, *trust* is a central concern. In
|
||||
particular, when communicating over an untrusted medium such as the internet, it
|
||||
is critical to ensure the integrity and publisher of all the data a system
|
||||
operates on. You use Docker to push and pull images (data) to a registry. Content trust
|
||||
gives you the ability to both verify the integrity and the publisher of all the
|
||||
data received from a registry over any channel.
|
||||
|
||||
Content trust is currently only available for users of the public Docker Hub. It
|
||||
is currently not available for the Docker Trusted Registry or for private
|
||||
registries.
|
||||
|
||||
## Understand trust in Docker
|
||||
|
||||
Content trust allows operations with a remote Docker registry to enforce
|
||||
client-side signing and verification of image tags. Content trust provides the
|
||||
ability to use digital signatures for data sent to and received from remote
|
||||
Docker registries. These signatures allow client-side verification of the
|
||||
integrity and publisher of specific image tags.
|
||||
|
||||
Currently, content trust is disabled by default. You must enabled it by setting
|
||||
the `DOCKER_CONTENT_TRUST` environment variable.
|
||||
|
||||
Once content trust is enabled, image publishers can sign their images. Image consumers can
|
||||
ensure that the images they use are signed. publishers and consumers can be
|
||||
individuals alone or in organizations. Docker's content trust supports users and
|
||||
automated processes such as builds.
|
||||
|
||||
### Image tags and content trust
|
||||
|
||||
An individual image record has the following identifier:
|
||||
|
||||
```
|
||||
[REGISTRY_HOST[:REGISTRY_PORT]/]REPOSITORY[:TAG]
|
||||
```
|
||||
|
||||
A particular image `REPOSITORY` can have multiple tags. For example, `latest` and
|
||||
`3.1.2` are both tags on the `mongo` image. An image publisher can build an image
|
||||
and tag combination many times changing the image with each build.
|
||||
|
||||
Content trust is associated with the `TAG` portion of an image. Each image
|
||||
repository has a set of keys that image publishers use to sign an image tag.
|
||||
Image publishers have discretion on which tags they sign.
|
||||
|
||||
An image repository can contain an image with one tag that is signed and another
|
||||
tag that is not. For example, consider [the Mongo image
|
||||
repository](https://hub.docker.com/r/library/mongo/tags/). The `latest`
|
||||
tag could be unsigned while the `3.1.6` tag could be signed. It is the
|
||||
responsibility of the image publisher to decide if an image tag is signed or
|
||||
not. In this representation, some image tags are signed, others are not:
|
||||
|
||||

|
||||
|
||||
Publishers can choose to sign a specific tag or not. As a result, the content of
|
||||
an unsigned tag and that of a signed tag with the same name may not match. For
|
||||
example, a publisher can push a tagged image `someimage:latest` and sign it.
|
||||
Later, the same publisher can push an unsigned `someimage:latest` image. This second
|
||||
push replaces the last unsigned tag `latest` but does not affect the signed `latest` version.
|
||||
The ability to choose which tags they can sign, allows publishers to iterate over
|
||||
the unsigned version of an image before officially signing it.
|
||||
|
||||
Image consumers can enable content trust to ensure that images they use were
|
||||
signed. If a consumer enables content trust, they can only pull, run, or build
|
||||
with trusted images. Enabling content trust is like wearing a pair of
|
||||
rose-colored glasses. Consumers "see" only signed images tags and the less
|
||||
desirable, unsigned image tags are "invisible" to them.
|
||||
|
||||

|
||||
|
||||
To the consumer who does not enabled content trust, nothing about how they
|
||||
work with Docker images changes. Every image is visible regardless of whether it
|
||||
is signed or not.
|
||||
|
||||
|
||||
### Content trust operations and keys
|
||||
|
||||
When content trust is enabled, `docker` CLI commands that operate on tagged images must
|
||||
either have content signatures or explicit content hashes. The commands that
|
||||
operate with content trust are:
|
||||
|
||||
* `push`
|
||||
* `build`
|
||||
* `create`
|
||||
* `pull`
|
||||
* `run`
|
||||
|
||||
For example, with content trust enabled a `docker pull someimage:latest` only
|
||||
succeeds if `someimage:latest` is signed. However, an operation with an explicit
|
||||
content hash always succeeds as long as the hash exists:
|
||||
|
||||
```bash
|
||||
$ docker pull someimage@sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a
|
||||
```
|
||||
|
||||
Trust for an image tag is managed through the use of signing keys. Docker's content
|
||||
trust makes use four different keys:
|
||||
|
||||
| Key | Description |
|
||||
|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| offline key | Root of content trust for a image tag. When content trust is enabled, you create the offline key once. |
|
||||
| target and snapshot | These two keys are known together as the "tagging" key. When content trust is enabled, you create this key when you add a new image repository. If you have the offline key, you can export the tagging key and allow other publishers to sign the image tags. |
|
||||
| timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. |
|
||||
|
||||
With the exception of the timestamp, all the keys are generated and stored locally
|
||||
client-side. The timestamp is safely generated and stored in a signing server that
|
||||
is deployed alongside the Docker registry. All keys are generated in a backend
|
||||
service that isn't directly exposed to the internet and are encrypted at rest.
|
||||
|
||||
The following image depicts the various signing keys and their relationships:
|
||||
|
||||

|
||||
|
||||
>**WARNING**: Loss of the offline key is **very difficult** to recover from.
|
||||
>Correcting this loss requires intervention from [Docker
|
||||
>Support](https://support.docker.com) to reset the repository state. This loss
|
||||
>also requires **manual intervention** from every consumer that used a signed
|
||||
>tag from this repository prior to the loss.
|
||||
|
||||
You should backup the offline key somewhere safe. Given that it is only required
|
||||
to create new repositories, it is a good idea to store it offline. Make sure you
|
||||
read [Manage keys for content trust](/security/trust/trust_key_mng) information
|
||||
for details on creating, securing, and backing up your keys.
|
||||
|
||||
## Survey of typical content trust operations
|
||||
|
||||
This section surveys the typical trusted operations users perform with Docker
|
||||
images.
|
||||
|
||||
### Enable content trust
|
||||
|
||||
Enable content trust by setting the `DOCKER_CONTENT_TRUST` environment variable.
|
||||
Enabling per-shell is useful because you can have one shell configured for
|
||||
trusted operations and another terminal shell for untrusted operations. You can
|
||||
also add this declaration to your shell profile to have it turned on always by
|
||||
default.
|
||||
|
||||
To enable content trust in a `bash` shell enter the following command:
|
||||
|
||||
```bash
|
||||
export DOCKER_CONTENT_TRUST=1
|
||||
```
|
||||
|
||||
Once set, each of the "tag" operations require key for trusted tag. All of these
|
||||
commands also support the `--disable-content-trust` flag. This flag allows
|
||||
publishers to run individual operations on tagged images without content trust on an
|
||||
as-needed basis.
|
||||
|
||||
|
||||
### Push trusted content
|
||||
|
||||
To create signed content for a specific image tag, simply enable content trust and push
|
||||
a tagged image. If this is the first time you have pushed an image using content trust
|
||||
on your system, the session looks like this:
|
||||
|
||||
```bash
|
||||
$ docker push docker/trusttest:latest
|
||||
The push refers to a repository [docker.io/docker/trusttest] (len: 1)
|
||||
9a61b6b1315e: Image already exists
|
||||
902b87aaaec9: Image already exists
|
||||
latest: digest: sha256:d02adacee0ac7a5be140adb94fa1dae64f4e71a68696e7f8e7cbf9db8dd49418 size: 3220
|
||||
Signing and pushing trust metadata
|
||||
You are about to create a new offline signing key passphrase. This passphrase
|
||||
will be used to protect the most sensitive key in your signing system. Please
|
||||
choose a long, complex passphrase and be careful to keep the password and the
|
||||
key file itself secure and backed up. It is highly recommended that you use a
|
||||
password manager to generate the passphrase and keep it safe. There will be no
|
||||
way to recover this key. You can find the key in your config directory.
|
||||
Enter passphrase for new offline key with id a1d96fb:
|
||||
Repeat passphrase for new offline key with id a1d96fb:
|
||||
Enter passphrase for new tagging key with id docker.io/docker/trusttest (3a932f1):
|
||||
Repeat passphrase for new tagging key with id docker.io/docker/trusttest (3a932f1):
|
||||
Finished initializing "docker.io/docker/trusttest"
|
||||
```
|
||||
When you push your first tagged image with content trust enabled, the `docker` client
|
||||
recognizes this is your first push and:
|
||||
|
||||
- alerts you that it will create a new offline key
|
||||
- requests a passphrase for the key
|
||||
- generates an offline key in the `~/.docker/trust` directory
|
||||
- generates a tagging key for in the `~/.docker/trust` directory
|
||||
|
||||
The passphrase you chose for both the offline key and your content key-pair should
|
||||
be randomly generated and stored in a *password manager*.
|
||||
|
||||
It is important to note, if you had left off the `latest` tag, content trust is skipped.
|
||||
This is true even if content trust is enabled and even if this is your first push.
|
||||
|
||||
```bash
|
||||
$ docker push docker/trusttest
|
||||
The push refers to a repository [docker.io/docker/trusttest] (len: 1)
|
||||
9a61b6b1315e: Image successfully pushed
|
||||
902b87aaaec9: Image successfully pushed
|
||||
latest: digest: sha256:a9a9c4402604b703bed1c847f6d85faac97686e48c579bd9c3b0fa6694a398fc size: 3220
|
||||
No tag specified, skipping trust metadata push
|
||||
```
|
||||
|
||||
It is skipped because as the message states, you did not supply an image `TAG`
|
||||
value. In Docker content trust, signatures are associated with tags.
|
||||
|
||||
Once you have an offline key on your system, subsequent images repositories
|
||||
you create can use that same offline key:
|
||||
|
||||
```bash
|
||||
$ docker push docker.io/docker/seaside:latest
|
||||
The push refers to a repository [docker.io/docker/seaside] (len: 1)
|
||||
a9539b34a6ab: Image successfully pushed
|
||||
b3dbab3810fc: Image successfully pushed
|
||||
latest: digest: sha256:d2ba1e603661a59940bfad7072eba698b79a8b20ccbb4e3bfb6f9e367ea43939 size: 3346
|
||||
Signing and pushing trust metadata
|
||||
Enter key passphrase for offline key with id a1d96fb:
|
||||
Enter passphrase for new tagging key with id docker.io/docker/seaside (bb045e3):
|
||||
Repeat passphrase for new tagging key with id docker.io/docker/seaside (bb045e3):
|
||||
Finished initializing "docker.io/docker/seaside"
|
||||
```
|
||||
|
||||
The new image has its own tagging key and timestamp key. The `latest` tag is signed with both of
|
||||
these.
|
||||
|
||||
|
||||
### Pull image content
|
||||
|
||||
A common way to consume an image is to `pull` it. With content trust enabled, the Docker
|
||||
client only allows `docker pull` to retrieve signed images.
|
||||
|
||||
```
|
||||
$ docker pull docker/seaside
|
||||
Using default tag: latest
|
||||
Pull (1 of 1): docker/trusttest:latest@sha256:d149ab53f871
|
||||
...
|
||||
Tagging docker/trusttest@sha256:d149ab53f871 as docker/trusttest:latest
|
||||
```
|
||||
|
||||
The `seaside:latest` image is signed. In the following example, the command does not specify a tag, so the system uses
|
||||
the `latest` tag by default again and the `docker/cliffs:latest` tag is not signed.
|
||||
|
||||
```bash
|
||||
$ docker pull docker/cliffs
|
||||
Using default tag: latest
|
||||
no trust data available
|
||||
```
|
||||
|
||||
Because the tag `docker/cliffs:latest` is not trusted, the `pull` fails.
|
||||
|
||||
|
||||
### Disable content trust for specific operations
|
||||
|
||||
A user that wants to disable content trust for a particular operation can use the
|
||||
`--disable-content-trust` flag. **Warning: this flag disables content trust for
|
||||
this operation**. With this flag, Docker will ignore content-trust and allow all
|
||||
operations to be done without verifying any signatures. If we wanted the
|
||||
previous untrusted build to succeed we could do:
|
||||
|
||||
```
|
||||
$ cat Dockerfile
|
||||
FROM docker/trusttest:notrust
|
||||
RUN echo
|
||||
$ docker build --disable-content-trust -t docker/trusttest:testing .
|
||||
Sending build context to Docker daemon 42.84 MB
|
||||
...
|
||||
Successfully built f21b872447dc
|
||||
```
|
||||
|
||||
The same is true for all the other commands, such as `pull` and `push`:
|
||||
|
||||
```
|
||||
$ docker pull --disable-content-trust docker/trusttest:untrusted
|
||||
...
|
||||
$ docker push --disable-content-trust docker/trusttest:untrusted
|
||||
...
|
||||
```
|
||||
|
||||
## Related information
|
||||
|
||||
* [Manage keys for content trust](/security/trust/trust_key_mng)
|
||||
* [Automation with content trust](/security/trust/trust_automation)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
|
||||
|
||||
|
||||
BIN
docs/security/trust/images/tag_signing.png
Normal file
BIN
docs/security/trust/images/tag_signing.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
1
docs/security/trust/images/trust_.gliffy
Normal file
1
docs/security/trust/images/trust_.gliffy
Normal file
File diff suppressed because one or more lines are too long
1
docs/security/trust/images/trust_components.gliffy
Normal file
1
docs/security/trust/images/trust_components.gliffy
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/security/trust/images/trust_components.png
Normal file
BIN
docs/security/trust/images/trust_components.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 121 KiB |
1
docs/security/trust/images/trust_signing.gliffy
Normal file
1
docs/security/trust/images/trust_signing.gliffy
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/security/trust/images/trust_signing.png
Normal file
BIN
docs/security/trust/images/trust_signing.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 70 KiB |
1
docs/security/trust/images/trust_view.gliffy
Normal file
1
docs/security/trust/images/trust_view.gliffy
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/security/trust/images/trust_view.png
Normal file
BIN
docs/security/trust/images/trust_view.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 58 KiB |
21
docs/security/trust/index.md
Normal file
21
docs/security/trust/index.md
Normal file
@@ -0,0 +1,21 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Use trusted images"
|
||||
description = "Use trusted images"
|
||||
keywords = ["trust, security, docker, index"]
|
||||
[menu.main]
|
||||
identifier="smn_content_trust"
|
||||
parent= "mn_docker_hub"
|
||||
weight=4
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Use trusted images
|
||||
|
||||
The following topics are available:
|
||||
|
||||
* [Content trust in Docker](/security/trust/content_trust)
|
||||
* [Manage keys for content trust](/security/trust/trust_key_mng)
|
||||
* [Automation with content trust](/security/trust/trust_automation)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
|
||||
79
docs/security/trust/trust_automation.md
Normal file
79
docs/security/trust/trust_automation.md
Normal file
@@ -0,0 +1,79 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Automation with content trust"
|
||||
description = "Automating content push pulls with trust"
|
||||
keywords = ["trust, security, docker, documentation, automation"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Automation with content trust
|
||||
|
||||
Your automation systems that pull or build images can also work with trust. Any automation environment must set `DOCKER_TRUST_ENABLED` either manually or in in a scripted fashion before processing images.
|
||||
|
||||
## Bypass requests for passphrases
|
||||
|
||||
To allow tools to wrap docker and push trusted content, there are two
|
||||
environment variables that allow you to provide the passphrases without an
|
||||
expect script, or typing them in:
|
||||
|
||||
- `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE`
|
||||
- `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE`
|
||||
|
||||
Docker attempts to use the contents of these environment variables as passphrase
|
||||
for the keys. For example, an image publisher can export the repository `target`
|
||||
and `snapshot` passphrases:
|
||||
|
||||
```bash
|
||||
$ export DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE="u7pEQcGoebUHm6LHe6"
|
||||
$ export DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE="l7pEQcTKJjUHm6Lpe4"
|
||||
```
|
||||
|
||||
Then, when pushing a new tag the Docker client does not request these values but signs automatically:
|
||||
|
||||
``bash
|
||||
$ docker push docker/trusttest:latest
|
||||
The push refers to a repository [docker.io/docker/trusttest] (len: 1)
|
||||
a9539b34a6ab: Image already exists
|
||||
b3dbab3810fc: Image already exists
|
||||
latest: digest: sha256:d149ab53f871 size: 3355
|
||||
Signing and pushing trust metadata
|
||||
```
|
||||
|
||||
## Building with content trust
|
||||
|
||||
You can also build with content trust. Before running the `docker build` command, you should set the environment variable `DOCKER_CONTENT_TRUST` either manually or in in a scripted fashion. Consider the simple Dockerfile below.
|
||||
|
||||
```Dockerfilea
|
||||
FROM docker/trusttest:latest
|
||||
RUN echo
|
||||
```
|
||||
|
||||
The `FROM` tag is pulling a signed image. You cannot build an image that has a
|
||||
`FROM` that is not either present locally or signed. Given that content trust
|
||||
data exists for the tag `latest`, the following build should succeed:
|
||||
|
||||
```bash
|
||||
$ docker build -t docker/trusttest:testing .
|
||||
Using default tag: latest
|
||||
latest: Pulling from docker/trusttest
|
||||
|
||||
b3dbab3810fc: Pull complete
|
||||
a9539b34a6ab: Pull complete
|
||||
Digest: sha256:d149ab53f871
|
||||
```
|
||||
|
||||
If content trust is enabled, building from a Dockerfile that relies on tag without trust data, causes the build command to fail:
|
||||
|
||||
```bash
|
||||
$ docker build -t docker/trusttest:testing .
|
||||
unable to process Dockerfile: No trust data for notrust
|
||||
```
|
||||
|
||||
## Related information
|
||||
|
||||
* [Content trust in Docker](/security/trust/content_trust)
|
||||
* [Manage keys for content trust](/security/trust/trust_key_mng)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
|
||||
74
docs/security/trust/trust_key_mng.md
Normal file
74
docs/security/trust/trust_key_mng.md
Normal file
@@ -0,0 +1,74 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Manage keys for content trust"
|
||||
description = "Manage keys for content trust"
|
||||
keywords = ["trust, security, root, keys, repository"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Manage keys for content trust
|
||||
|
||||
Trust for an image tag is managed through the use of keys. Docker's content
|
||||
trust makes use four different keys:
|
||||
|
||||
| Key | Description |
|
||||
|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| offline key | Root of content trust for a image tag. When content trust is enabled, you create the offline key once. |
|
||||
| target and snapshot | These two keys are known together as the "tagging" key. When content trust is enabled, you create this key when you add a new image repository. If you have the offline key, you can export the tagging key and allow other publishers to sign the image tags. |
|
||||
| timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. |
|
||||
|
||||
With the exception of the timestamp, all the keys are generated and stored locally
|
||||
client-side. The timestamp is safely generated and stored in a signing server that
|
||||
is deployed alongside the Docker registry. All keys are generated in a backend
|
||||
service that isn't directly exposed to the internet and are encrypted at rest.
|
||||
|
||||
## Choosing a passphrase
|
||||
|
||||
The passphrases you chose for both the offline key and your tagging key should
|
||||
be randomly generated and stored in a password manager. Having the tagging key
|
||||
allow users to sign image tags on a repository. Passphrases are used to encrypt
|
||||
your keys at rest and ensures that a lost laptop or an unintended backup doesn't
|
||||
put the private key material at risk.
|
||||
|
||||
## Back up your keys
|
||||
|
||||
All the Docker trust keys are stored encrypted using the passphrase you provide
|
||||
on creation. Even so, you should still take care of the location where you back them up.
|
||||
Good practice is to create two encrypted USB keys.
|
||||
|
||||
It is very important that you backup your keys to a safe, secure location. Loss
|
||||
of the tagging key is recoverable; loss of the offline key is not.
|
||||
|
||||
The Docker client stores the keys in the `~/.docker/trust/private` directory.
|
||||
Before backing them up, you should `tar` them into an archive:
|
||||
|
||||
```bash
|
||||
$ tar -zcvf private_keys_backup.tar.gz ~/.docker/trust/private
|
||||
$ chmod 600 private_keys_backup.tar.gz
|
||||
```
|
||||
|
||||
## Lost keys
|
||||
|
||||
If a publisher loses keys it means losing the ability to sign trusted content for
|
||||
your repositories. If you lose a key, contact [Docker
|
||||
Support](https://support.docker.com) (support@docker.com) to reset the repository
|
||||
state.
|
||||
|
||||
This loss also requires **manual intervention** from every consumer that pulled
|
||||
the tagged image prior to the loss. Image consumers would get an error for
|
||||
content that they already downloaded:
|
||||
|
||||
```
|
||||
could not validate the path to a trusted root: failed to validate data with current trusted certificates
|
||||
```
|
||||
|
||||
To correct this, they need to download a new image tag with that is signed with
|
||||
the new key.
|
||||
|
||||
## Related information
|
||||
|
||||
* [Content trust in Docker](/security/trust/content_trust)
|
||||
* [Automation with content trust](/security/trust/trust_automation)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
331
docs/security/trust/trust_sandbox.md
Normal file
331
docs/security/trust/trust_sandbox.md
Normal file
@@ -0,0 +1,331 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Play in a content trust sandbox"
|
||||
description = "Play in a trust sandbox"
|
||||
keywords = ["trust, security, root, keys, repository, sandbox"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Play in a content trust sandbox
|
||||
|
||||
This page explains how to set up and use a sandbox for experimenting with trust.
|
||||
The sandbox allows you to configure and try trust operations locally without
|
||||
impacting your production images.
|
||||
|
||||
Before working through this sandbox, you should have read through the [trust
|
||||
overview](content_trust.md).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
These instructions assume you are running in Linux or Mac OS X. You can run
|
||||
this sandbox on a local machine or on a virtual machine. You will need to
|
||||
have `sudo` privileges on your local machine or in the VM.
|
||||
|
||||
This sandbox requires you to install two Docker tools: Docker Engine and Docker
|
||||
Compose. To install the Docker Engine, choose from the [list of supported
|
||||
platforms]({{< relref "installation.md" >}}). To install Docker Compose, see the
|
||||
[detailed instructions here]({{< relref "compose/install" >}}).
|
||||
|
||||
Finally, you'll need to have `git` installed on your local system or VM.
|
||||
|
||||
## What is in the sandbox?
|
||||
|
||||
If you are just using trust out-of-the-box you only need your Docker Engine
|
||||
client and access to Docker's own public hub. The sandbox mimics a
|
||||
production trust environment, and requires these additional components:
|
||||
|
||||
| Container | Description |
|
||||
|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| nostarysandbox | A container with the latest version of Docker Engine and with some preconfigured certifications. This is your sandbox where you can use the `docker` client to test trust operations. |
|
||||
| Registry server | A local registry service. |
|
||||
| Notary server | The service that does all the heavy-lifting of managing trust |
|
||||
| Notary signer | A service that ensures that your keys are secure. |
|
||||
| MySQL | The database where all of the trust information will be stored |
|
||||
|
||||
The sandbox uses the Docker daemon on your local system. Within the `nostarysandbox`
|
||||
you interact with a local registry rather than the public Docker Hub. This means
|
||||
your everyday image repositories are not used. They are protected while you play.
|
||||
|
||||
When you play in the sandbox, you'll also create root and tagging keys. The
|
||||
sandbox is configured to store all the keys and files inside the `notarysandbox`
|
||||
container. Since the keys you create in the sandbox are for play only,
|
||||
destroying the container destroys them as well.
|
||||
|
||||
|
||||
## Build the sandbox
|
||||
|
||||
In this section, you build the Docker components for your trust sandbox. If you
|
||||
work exclusively with the Docker Hub, you would not need with these components.
|
||||
They are built into the Docker Hub for you. For the sandbox, however, you must
|
||||
build your own entire, mock production environment and registry.
|
||||
|
||||
### Configure /etc/hosts
|
||||
|
||||
The sandbox' `notaryserver` and `sandboxregistry` run on your local server. The
|
||||
client inside the `notarysandbox` container connects to them over your network.
|
||||
So, you'll need an entry for both the servers in your local `/etc/hosts` file.
|
||||
|
||||
1. Add an entry for the `notaryserver` to `/etc/hosts`.
|
||||
|
||||
$ sudo sh -c 'echo "127.0.0.1 notaryserver" >> /etc/hosts'
|
||||
|
||||
2. Add an entry for the `sandboxregistry` to `/etc/hosts`.
|
||||
|
||||
$ sudo sh -c 'echo "127.0.0.1 sandboxregistry" >> /etc/hosts'
|
||||
|
||||
|
||||
### Build the notarytest image
|
||||
|
||||
1. Create a `notarytest` directory on your system.
|
||||
|
||||
$ mkdir notarysandbox
|
||||
|
||||
2. Change into your `notarysandbox` directory.
|
||||
|
||||
$ cd notarysandbox
|
||||
|
||||
3. Create a `notarytest` directory then change into that.
|
||||
|
||||
$ mkdir notarytest
|
||||
$ cd nostarytest
|
||||
|
||||
4. Create a filed called `Dockerfile` with your favorite editor.
|
||||
|
||||
5. Add the following to the new file.
|
||||
|
||||
FROM debian:jessie
|
||||
|
||||
ADD https://master.dockerproject.org/linux/amd64/docker /usr/bin/docker
|
||||
RUN chmod +x /usr/bin/docker \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
tree \
|
||||
vim \
|
||||
git \
|
||||
ca-certificates \
|
||||
--no-install-recommends
|
||||
|
||||
WORKDIR /root
|
||||
RUN git clone -b trust-sandbox https://github.com/docker/notary.git
|
||||
RUN cp /root/notary/fixtures/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
ENTRYPOINT ["bash"]
|
||||
|
||||
6. Save and close the file.
|
||||
|
||||
7. Build the testing container.
|
||||
|
||||
$ docker build -t nostarysandbox .
|
||||
Sending build context to Docker daemon 2.048 kB
|
||||
Step 0 : FROM debian:jessie
|
||||
...
|
||||
Successfully built 5683f17e9d72
|
||||
|
||||
|
||||
### Build and start up the trust servers
|
||||
|
||||
In this step, you get the source code for your notary and registry services.
|
||||
Then, you'll use Docker Compose to build and start them on your local system.
|
||||
|
||||
1. Change to back to the root of your `notarysandbox` directory.
|
||||
|
||||
$ cd notarysandbox
|
||||
|
||||
2. Clone the `notary` project.
|
||||
|
||||
$ git clone -b trust-sandbox https://github.com/docker/notary.git
|
||||
|
||||
3. Clone the `distribution` project.
|
||||
|
||||
$ git clone https://github.com/docker/distribution.git
|
||||
|
||||
4. Change to the `notary` project directory.
|
||||
|
||||
$ cd notary
|
||||
|
||||
The directory contains a `docker-compose` file that you'll use to run a
|
||||
notary server together with a notary signer and the corresponding MySQL
|
||||
databases. The databases store the trust information for an image.
|
||||
|
||||
5. Build the server images.
|
||||
|
||||
$ docker-compose build
|
||||
|
||||
The first time you run this, the build takes some time.
|
||||
|
||||
6. Run the server containers on your local system.
|
||||
|
||||
$ docker-compose up -d
|
||||
|
||||
Once the trust services are up, you'll setup a local version of the Docker
|
||||
Registry v2.
|
||||
|
||||
7. Change to the `nostarysandbox/distribution` directory.
|
||||
|
||||
8. Build the `sandboxregistry` server.
|
||||
|
||||
$ docker build -t sandboxregistry .
|
||||
|
||||
9. Start the `sandboxregistry` server running.
|
||||
|
||||
$ docker run -p 5000:5000 --name sandboxregistry sandboxregistry &
|
||||
|
||||
## Playing in the sandbox
|
||||
|
||||
Now that everything is setup, you can go into your `nostarysandbox` container and
|
||||
start testing Docker content trust.
|
||||
|
||||
|
||||
### Start the notarysandbox container
|
||||
|
||||
In this procedure, you start the `notarysandbox` and link it to the running
|
||||
`notary_notaryserver_1` and `sandboxregistry` containers. The links allow
|
||||
communication among the containers.
|
||||
|
||||
```
|
||||
$ docker run -it -v /var/run/docker.sock:/var/run/docker.sock --link notary_notaryserver_1:notaryserver --link sandboxregistry:sandboxregistry nostarysandbox
|
||||
root@0710762bb59a:/#
|
||||
```
|
||||
|
||||
Mounting the `docker.sock` gives the `nostarysandbox` access to the `docker`
|
||||
deamon on your host, while storing all the keys and files inside the sandbox
|
||||
container. When you destroy the container, you destroy the "play" keys.
|
||||
|
||||
### Test some trust operations
|
||||
|
||||
Now, you'll pull some images.
|
||||
|
||||
1. Download a `docker` image to test with.
|
||||
|
||||
# docker pull docker/trusttest
|
||||
docker pull docker/trusttest
|
||||
Using default tag: latest
|
||||
latest: Pulling from docker/trusttest
|
||||
|
||||
b3dbab3810fc: Pull complete
|
||||
a9539b34a6ab: Pull complete
|
||||
Digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a
|
||||
Status: Downloaded newer image for docker/trusttest:latest
|
||||
|
||||
2. Tag it to be pushed to our sandbox registry:
|
||||
|
||||
# docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest
|
||||
|
||||
3. Enable content trust.
|
||||
|
||||
# export DOCKER_CONTENT_TRUST=1
|
||||
|
||||
4. Identify the trust server.
|
||||
|
||||
# export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443
|
||||
|
||||
This step is only necessary because the sandbox is using its own server.
|
||||
Normally, if you are using the Docker Public Hub this step isn't necessary.
|
||||
|
||||
5. Pull the test image.
|
||||
|
||||
# docker pull sandboxregistry:5000/test/trusttest
|
||||
Using default tag: latest
|
||||
no trust data available
|
||||
|
||||
You see an error, because this content doesn't exist on the `sandboxregistry` yet.
|
||||
|
||||
6. Push the trusted image.
|
||||
|
||||
# docker push sandboxregistry:5000/test/trusttest:latest
|
||||
The push refers to a repository [sandboxregistry:5000/test/trusttest] (len: 1)
|
||||
a9539b34a6ab: Image successfully pushed
|
||||
b3dbab3810fc: Image successfully pushed
|
||||
latest: digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c size: 3348
|
||||
Signing and pushing trust metadata
|
||||
You are about to create a new root signing key passphrase. This passphrase
|
||||
will be used to protect the most sensitive key in your signing system. Please
|
||||
choose a long, complex passphrase and be careful to keep the password and the
|
||||
key file itself secure and backed up. It is highly recommended that you use a
|
||||
password manager to generate the passphrase and keep it safe. There will be no
|
||||
way to recover this key. You can find the key in your config directory.
|
||||
Enter passphrase for new offline key with id 8c69e04:
|
||||
Repeat passphrase for new offline key with id 8c69e04:
|
||||
Enter passphrase for new tagging key with id sandboxregistry:5000/test/trusttest (93c362a):
|
||||
Repeat passphrase for new tagging key with id sandboxregistry:5000/test/trusttest (93c362a):
|
||||
Finished initializing "sandboxregistry:5000/test/trusttest"
|
||||
latest: digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a size: 3355
|
||||
Signing and pushing trust metadata
|
||||
|
||||
7. Try pulling the image you just pushed:
|
||||
|
||||
# docker pull sandboxregistry:5000/test/trusttest
|
||||
Using default tag: latest
|
||||
Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c: Pulling from test/trusttest
|
||||
b3dbab3810fc: Already exists
|
||||
a9539b34a6ab: Already exists
|
||||
Digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
Status: Downloaded newer image for sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
Tagging sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c as sandboxregistry:5000/test/trusttest:latest
|
||||
|
||||
|
||||
### Test with malicious images
|
||||
|
||||
What happens when data is corrupted and you try to pull it when trust is
|
||||
enabled? In this section, you go into the `sandboxregistry` and tamper with some
|
||||
data. Then, you try and pull it.
|
||||
|
||||
1. Leave the sandbox container running.
|
||||
|
||||
2. Open a new bash terminal from your host into the `sandboxregistry`.
|
||||
|
||||
$ docker exec -it sandboxregistry bash
|
||||
296db6068327#
|
||||
|
||||
3. Change into the registry storage.
|
||||
|
||||
You'll need to provide the `sha` you received when you pushed the image.
|
||||
|
||||
# cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042
|
||||
|
||||
4. Add malicious data to one of the trusttest layers:
|
||||
|
||||
# echo "Malicious data" > data
|
||||
|
||||
5. Got back to your sandbox terminal.
|
||||
|
||||
6. List the trusttest image.
|
||||
|
||||
# docker images | grep trusttest
|
||||
docker/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB
|
||||
sandboxregistry:5000/test/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB
|
||||
sandboxregistry:5000/test/trusttest <none> a9539b34a6ab 7 weeks ago 5.025 MB
|
||||
|
||||
7. Remove the `trusttest:latest` image.
|
||||
|
||||
# docker rmi -f a9539b34a6ab
|
||||
Untagged: docker/trusttest:latest
|
||||
Untagged: sandboxregistry:5000/test/trusttest:latest
|
||||
Untagged: sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
Deleted: a9539b34a6aba01d3942605dfe09ab821cd66abf3cf07755b0681f25ad81f675
|
||||
Deleted: b3dbab3810fc299c21f0894d39a7952b363f14520c2f3d13443c669b63b6aa20
|
||||
|
||||
8. Pull the image again.
|
||||
|
||||
# docker pull sandboxregistry:5000/test/trusttest
|
||||
Using default tag: latest
|
||||
...
|
||||
b3dbab3810fc: Verifying Checksum
|
||||
a9539b34a6ab: Pulling fs layer
|
||||
filesystem layer verification failed for digest sha256:aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042
|
||||
|
||||
You'll see the the pull did not complete because the trust system was
|
||||
unable to verify the image.
|
||||
|
||||
## More play in the sandbox
|
||||
|
||||
Now, that you have a full Docker content trust sandbox on your local system,
|
||||
feel free to play with it and see how it behaves. If you find any security
|
||||
issues with Docker, feel free to send us an email at <security@docker.com>.
|
||||
|
||||
|
||||
|
||||
@@ -256,7 +256,7 @@ Let's create a directory and a `Dockerfile` first.
|
||||
$ cd sinatra
|
||||
$ touch Dockerfile
|
||||
|
||||
If you are using Boot2Docker on Windows, you may access your host
|
||||
If you are using Docker Machine on Windows, you may access your host
|
||||
directory by `cd` to `/c/Users/your_user_name`.
|
||||
|
||||
Each instruction creates a new layer of the image. Let's look at a simple
|
||||
|
||||
@@ -15,9 +15,10 @@ parent = "smn_applied"
|
||||
Docker allows you to run applications inside containers. Running an
|
||||
application inside a container takes a single command: `docker run`.
|
||||
|
||||
> **Note:** if you are using a remote Docker daemon, such as Boot2Docker,
|
||||
> then _do not_ type the `sudo` before the `docker` commands shown in the
|
||||
> documentation's examples.
|
||||
>**Note**: Depending on your Docker system configuration, you may be required to
|
||||
>preface each `docker` command on this page with `sudo`. To avoid this behavior,
|
||||
>your system administrator can create a Unix group called `docker` and add users
|
||||
>to it.
|
||||
|
||||
## Hello world
|
||||
|
||||
|
||||
@@ -74,29 +74,34 @@ The output will provide details on the container configurations including the
|
||||
volumes. The output should look something similar to the following:
|
||||
|
||||
...
|
||||
"Volumes": {
|
||||
"/webapp": "/var/lib/docker/volumes/fac362...80535"
|
||||
},
|
||||
"VolumesRW": {
|
||||
"/webapp": true
|
||||
}
|
||||
Mounts": [
|
||||
{
|
||||
"Name": "fac362...80535",
|
||||
"Source": "/var/lib/docker/volumes/fac362...80535/_data",
|
||||
"Destination": "/webapp",
|
||||
"Driver": "local",
|
||||
"Mode": "",
|
||||
"RW": true
|
||||
}
|
||||
]
|
||||
...
|
||||
|
||||
You will notice in the above 'Volumes' is specifying the location on the host and
|
||||
'VolumesRW' is specifying that the volume is read/write.
|
||||
You will notice in the above 'Source' is specifying the location on the host and
|
||||
'Destination' is specifying the volume location inside the container. `RW` shows
|
||||
if the volume is read/write.
|
||||
|
||||
### Mount a host directory as a data volume
|
||||
|
||||
In addition to creating a volume using the `-v` flag you can also mount a
|
||||
directory from your Docker daemon's host into a container.
|
||||
|
||||
> **Note:**
|
||||
> If you are using Boot2Docker, your Docker daemon only has limited access to
|
||||
> your OS X/Windows filesystem. Boot2Docker tries to auto-share your `/Users`
|
||||
> (OS X) or `C:\Users` (Windows) directory - and so you can mount files or directories
|
||||
> using `docker run -v /Users/<path>:/<container path> ...` (OS X) or
|
||||
> `docker run -v /c/Users/<path>:/<container path ...` (Windows). All other paths
|
||||
> come from the Boot2Docker virtual machine's filesystem.
|
||||
>**Note**: If you are using Docker Machine on Mac or Windows, your Docker daemon
|
||||
>only has limited access to your OS X/Windows filesystem. Docker Machine tries
|
||||
>to auto-share your `/Users` (OS X) or `C:\Users` (Windows) directory - and so
|
||||
>you can mount files or directories using `docker run -v
|
||||
>/Users/<path>:/<container path> ...` (OS X) or `docker run -v
|
||||
>/c/Users/<path>:/<container path ...` (Windows). All other paths come from your
|
||||
>virtual machine's filesystem.
|
||||
|
||||
$ docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py
|
||||
|
||||
|
||||
53
docs/userguide/image_management.md
Normal file
53
docs/userguide/image_management.md
Normal file
@@ -0,0 +1,53 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
alias = [ "/reference/api/hub_registry_spec/"]
|
||||
title = "Image management"
|
||||
description = "Documentation for docker Registry and Registry API"
|
||||
keywords = ["docker, registry, api, hub"]
|
||||
[menu.main]
|
||||
parent="mn_docker_hub"
|
||||
weight=-1
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Image management
|
||||
|
||||
The Docker Engine provides a client which you can use to create images on the command line or through a build process. You can run these images in a container or publish them for others to use. Storing the images you create, searching for images you might want, or publishing images others might use are all elements of image management.
|
||||
|
||||
This section provides an overview of the major features and products Docker provides for image management.
|
||||
|
||||
|
||||
## Docker Hub
|
||||
|
||||
The [Docker Hub](https://docs.docker.com/docker-hub/) is responsible for centralizing information about user accounts, images, and public name spaces. It has different components:
|
||||
|
||||
- Web UI
|
||||
- Meta-data store (comments, stars, list public repositories)
|
||||
- Authentication service
|
||||
- Tokenization
|
||||
|
||||
There is only one instance of the Docker Hub, run and managed by Docker Inc. This public Hub is useful for most individuals and smaller companies.
|
||||
|
||||
## Docker Registry and the Docker Trusted Registry
|
||||
|
||||
The Docker Registry is a component of Docker's ecosystem. A registry is a
|
||||
storage and content delivery system, holding named Docker images, available in
|
||||
different tagged versions. For example, the image `distribution/registry`, with
|
||||
tags `2.0` and `latest`. Users interact with a registry by using docker push and
|
||||
pull commands. For example, `docker pull myregistry.com/stevvooe/batman:voice`.
|
||||
|
||||
The Docker Hub has its own registry which, like the Hub itself, is run and managed by Docker. There are other ways to obtain a registry. You can purchase the [Docker Trusted Registry](https://docs.docker.com/dockter-trusted-registry) product to run on your company's network. Alternatively, you can use the Docker Registry component to build a private registry. For information about using a registry, see overview for the [Docker Registry](https://docs.docker.com/registry).
|
||||
|
||||
|
||||
## Content Trust
|
||||
|
||||
When transferring data among networked systems, *trust* is a central concern. In
|
||||
particular, when communicating over an untrusted medium such as the internet, it
|
||||
is critical to ensure the integrity and publisher of the all the data a system
|
||||
operates on. You use Docker to push and pull images (data) to a registry.
|
||||
Content trust gives you the ability to both verify the integrity and the
|
||||
publisher of all the data received from a registry over any channel.
|
||||
|
||||
[Content trust](/security/trust) is currently only available for users of the
|
||||
public Docker Hub. It is currently not available for the Docker Trusted Registry
|
||||
or for private registries.
|
||||
@@ -13,18 +13,18 @@ please feel free to provide any feedback on these features you wish.
|
||||
|
||||
Unlike the regular Docker binary, the experimental channels is built and updated nightly on TO.BE.ANNOUNCED. From one day to the next, new features may appear, while existing experimental features may be refined or entirely removed.
|
||||
|
||||
1. Verify that you have `wget` installed.
|
||||
1. Verify that you have `curl` installed.
|
||||
|
||||
$ which wget
|
||||
$ which curl
|
||||
|
||||
If `wget` isn't installed, install it after updating your manager:
|
||||
If `curl` isn't installed, install it after updating your manager:
|
||||
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install wget
|
||||
$ sudo apt-get install curl
|
||||
|
||||
2. Get the latest Docker package.
|
||||
|
||||
$ wget -qO- https://experimental.docker.com/ | sh
|
||||
$ curl -sSL https://experimental.docker.com/ | sh
|
||||
|
||||
The system prompts you for your `sudo` password. Then, it downloads and
|
||||
installs Docker and its dependencies.
|
||||
@@ -34,7 +34,7 @@ Unlike the regular Docker binary, the experimental channels is built and updated
|
||||
>command fails for the Docker repo during installation. To work around this,
|
||||
>add the key directly using the following:
|
||||
>
|
||||
> $ wget -qO- https://experimental.docker.com/gpg | sudo apt-key add -
|
||||
> $ curl -sSL https://experimental.docker.com/gpg | sudo apt-key add -
|
||||
|
||||
3. Verify `docker` is installed correctly.
|
||||
|
||||
@@ -61,8 +61,6 @@ After downloading the appropriate binary, you can follow the instructions
|
||||
|
||||
## Current experimental features
|
||||
|
||||
* [Support for Docker plugins](plugins.md)
|
||||
* [Volume plugins](plugins_volume.md)
|
||||
* [Network plugins](plugins_network.md)
|
||||
* [Native Multi-host networking](networking.md)
|
||||
* [Compose, Swarm and networking integration](compose_swarm_networking.md)
|
||||
|
||||
@@ -61,7 +61,7 @@ func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConf
|
||||
return err
|
||||
}
|
||||
|
||||
endpoints, err := s.registryService.LookupEndpoints(repoInfo.CanonicalName)
|
||||
endpoints, err := s.registryService.LookupPullEndpoints(repoInfo.CanonicalName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
104
graph/pull_v2.go
104
graph/pull_v2.go
@@ -1,6 +1,7 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -76,7 +77,7 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
|
||||
if err != nil {
|
||||
if c != nil {
|
||||
// Another pull of the same repository is already taking place; just wait for it to finish
|
||||
p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName)
|
||||
p.config.OutStream.Write(p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName))
|
||||
<-c
|
||||
return nil
|
||||
}
|
||||
@@ -102,13 +103,13 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
|
||||
|
||||
// downloadInfo is used to pass information from download to extractor
|
||||
type downloadInfo struct {
|
||||
img *image.Image
|
||||
tmpFile *os.File
|
||||
digest digest.Digest
|
||||
layer distribution.ReadSeekCloser
|
||||
size int64
|
||||
err chan error
|
||||
verified bool
|
||||
img *image.Image
|
||||
tmpFile *os.File
|
||||
digest digest.Digest
|
||||
layer distribution.ReadSeekCloser
|
||||
size int64
|
||||
err chan error
|
||||
out io.Writer // Download progress is written here.
|
||||
}
|
||||
|
||||
type errVerification struct{}
|
||||
@@ -118,7 +119,7 @@ func (errVerification) Error() string { return "verification failed" }
|
||||
func (p *v2Puller) download(di *downloadInfo) {
|
||||
logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID)
|
||||
|
||||
out := p.config.OutStream
|
||||
out := di.out
|
||||
|
||||
if c, err := p.poolAdd("pull", "img:"+di.img.ID); err != nil {
|
||||
if c != nil {
|
||||
@@ -139,9 +140,9 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
blobs := p.repo.Blobs(nil)
|
||||
blobs := p.repo.Blobs(context.Background())
|
||||
|
||||
desc, err := blobs.Stat(nil, di.digest)
|
||||
desc, err := blobs.Stat(context.Background(), di.digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error statting layer: %v", err)
|
||||
di.err <- err
|
||||
@@ -149,7 +150,7 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||
}
|
||||
di.size = desc.Size
|
||||
|
||||
layerDownload, err := blobs.Open(nil, di.digest)
|
||||
layerDownload, err := blobs.Open(context.Background(), di.digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error fetching layer: %v", err)
|
||||
di.err <- err
|
||||
@@ -176,9 +177,11 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||
|
||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))
|
||||
|
||||
di.verified = verifier.Verified()
|
||||
if !di.verified {
|
||||
logrus.Infof("Image verification failed for layer %s", di.digest)
|
||||
if !verifier.Verified() {
|
||||
err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
|
||||
logrus.Error(err)
|
||||
di.err <- err
|
||||
return
|
||||
}
|
||||
|
||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
|
||||
@@ -190,7 +193,7 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||
di.err <- nil
|
||||
}
|
||||
|
||||
func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
|
||||
func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error) {
|
||||
logrus.Debugf("Pulling tag from V2 registry: %q", tag)
|
||||
out := p.config.OutStream
|
||||
|
||||
@@ -203,7 +206,7 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
verified, err := p.validateManifest(manifest, tag)
|
||||
verified, err = p.validateManifest(manifest, tag)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -211,6 +214,33 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
|
||||
logrus.Printf("Image manifest for %s has been verified", taggedName)
|
||||
}
|
||||
|
||||
// By using a pipeWriter for each of the downloads to write their progress
|
||||
// to, we can avoid an issue where this function returns an error but
|
||||
// leaves behind running download goroutines. By splitting the writer
|
||||
// with a pipe, we can close the pipe if there is any error, consequently
|
||||
// causing each download to cancel due to an error writing to this pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
if _, err := io.Copy(out, pipeReader); err != nil {
|
||||
logrus.Errorf("error copying from layer download progress reader: %s", err)
|
||||
if err := pipeReader.CloseWithError(err); err != nil {
|
||||
logrus.Errorf("error closing the progress reader: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// All operations on the pipe are synchronous. This call will wait
|
||||
// until all current readers/writers are done using the pipe then
|
||||
// set the error. All successive reads/writes will return with this
|
||||
// error.
|
||||
pipeWriter.CloseWithError(errors.New("download canceled"))
|
||||
} else {
|
||||
// If no error then just close the pipe.
|
||||
pipeWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))
|
||||
|
||||
downloads := make([]downloadInfo, len(manifest.FSLayers))
|
||||
@@ -241,6 +271,7 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
|
||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
|
||||
|
||||
downloads[i].err = make(chan error)
|
||||
downloads[i].out = pipeWriter
|
||||
go p.download(&downloads[i])
|
||||
}
|
||||
|
||||
@@ -252,7 +283,6 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
verified = verified && d.verified
|
||||
if d.layer != nil {
|
||||
// if tmpFile is empty assume download and extracted elsewhere
|
||||
defer os.Remove(d.tmpFile.Name())
|
||||
@@ -368,6 +398,28 @@ func (p *v2Puller) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey
|
||||
}
|
||||
|
||||
func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) {
|
||||
// If pull by digest, then verify the manifest digest. NOTE: It is
|
||||
// important to do this first, before any other content validation. If the
|
||||
// digest cannot be verified, don't even bother with those other things.
|
||||
if manifestDigest, err := digest.ParseDigest(tag); err == nil {
|
||||
verifier, err := digest.NewDigestVerifier(manifestDigest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
payload, err := m.Payload()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err := verifier.Write(payload); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("image verification failed for digest %s", manifestDigest)
|
||||
logrus.Error(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ?
|
||||
if m == nil {
|
||||
return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
|
||||
@@ -389,21 +441,5 @@ func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (ver
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error verifying manifest keys: %v", err)
|
||||
}
|
||||
localDigest, err := digest.ParseDigest(tag)
|
||||
// if pull by digest, then verify
|
||||
if err == nil {
|
||||
verifier, err := digest.NewDigestVerifier(localDigest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
payload, err := m.Payload()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err := verifier.Write(payload); err != nil {
|
||||
return false, err
|
||||
}
|
||||
verified = verified && verifier.Verified()
|
||||
}
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
@@ -29,13 +29,12 @@ func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository
|
||||
switch endpoint.Version {
|
||||
case registry.APIVersion2:
|
||||
return &v2Pusher{
|
||||
TagStore: s,
|
||||
endpoint: endpoint,
|
||||
localRepo: localRepo,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
sf: sf,
|
||||
layersSeen: make(map[string]bool),
|
||||
TagStore: s,
|
||||
endpoint: endpoint,
|
||||
localRepo: localRepo,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
sf: sf,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Pusher{
|
||||
@@ -60,7 +59,7 @@ func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) erro
|
||||
return err
|
||||
}
|
||||
|
||||
endpoints, err := s.registryService.LookupEndpoints(repoInfo.CanonicalName)
|
||||
endpoints, err := s.registryService.LookupPushEndpoints(repoInfo.CanonicalName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -27,11 +27,6 @@ type v2Pusher struct {
|
||||
config *ImagePushConfig
|
||||
sf *streamformatter.StreamFormatter
|
||||
repo distribution.Repository
|
||||
|
||||
// layersSeen is the set of layers known to exist on the remote side.
|
||||
// This avoids redundant queries when pushing multiple tags that
|
||||
// involve the same layers.
|
||||
layersSeen map[string]bool
|
||||
}
|
||||
|
||||
func (p *v2Pusher) Push() (fallback bool, err error) {
|
||||
@@ -92,6 +87,8 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
return fmt.Errorf("tag does not exist: %s", tag)
|
||||
}
|
||||
|
||||
layersSeen := make(map[string]bool)
|
||||
|
||||
layer, err := p.graph.Get(layerId)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -120,7 +117,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.layersSeen[layer.ID] {
|
||||
if layersSeen[layer.ID] {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -141,7 +138,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
dgst, err := p.graph.GetDigest(layer.ID)
|
||||
switch err {
|
||||
case nil:
|
||||
_, err := p.repo.Blobs(nil).Stat(nil, dgst)
|
||||
_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
|
||||
switch err {
|
||||
case nil:
|
||||
exists = true
|
||||
@@ -161,7 +158,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
// if digest was empty or not saved, or if blob does not exist on the remote repository,
|
||||
// then fetch it.
|
||||
if !exists {
|
||||
if pushDigest, err := p.pushV2Image(p.repo.Blobs(nil), layer); err != nil {
|
||||
if pushDigest, err := p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil {
|
||||
return err
|
||||
} else if pushDigest != dgst {
|
||||
// Cache new checksum
|
||||
@@ -175,7 +172,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
|
||||
m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
|
||||
|
||||
p.layersSeen[layer.ID] = true
|
||||
layersSeen[layer.ID] = true
|
||||
}
|
||||
|
||||
logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
|
||||
@@ -229,7 +226,7 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
||||
|
||||
// Send the layer
|
||||
logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
|
||||
layerUpload, err := bs.Create(nil)
|
||||
layerUpload, err := bs.Create(context.Background())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -253,7 +250,7 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
||||
}
|
||||
|
||||
desc := distribution.Descriptor{Digest: dgst}
|
||||
if _, err := layerUpload.Commit(nil, desc); err != nil {
|
||||
if _, err := layerUpload.Commit(context.Background(), desc); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -34,7 +35,7 @@ func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) {
|
||||
Id: image.ID,
|
||||
Parent: image.Parent,
|
||||
Comment: image.Comment,
|
||||
Created: image.Created,
|
||||
Created: image.Created.Format(time.RFC3339Nano),
|
||||
Container: image.Container,
|
||||
ContainerConfig: &image.ContainerConfig,
|
||||
DockerVersion: image.DockerVersion,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user