mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Compare commits
38 Commits
v1.8.0-rc3
...
v1.8.2-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28220acecc | ||
|
|
10f250c72b | ||
|
|
3e8da36017 | ||
|
|
1cce9a26a3 | ||
|
|
d7f8b4d43e | ||
|
|
6f7bbc3171 | ||
|
|
947087fb24 | ||
|
|
ffe7e48ed6 | ||
|
|
eeecd1cf59 | ||
|
|
3f411db15b | ||
|
|
789197f33d | ||
|
|
0c71d09921 | ||
|
|
cc8320cb58 | ||
|
|
c22b292719 | ||
|
|
14d2083f14 | ||
|
|
ea56c5e1ce | ||
|
|
341ff018a2 | ||
|
|
e07819293a | ||
|
|
6ec8d40ae7 | ||
|
|
16d64608f3 | ||
|
|
00a27b6872 | ||
|
|
fc12b9ddce | ||
|
|
b66e5ef208 | ||
|
|
d12ea79c9d | ||
|
|
a9aaa66780 | ||
|
|
e19060dcea | ||
|
|
b0e0dbb33b | ||
|
|
0d03096b65 | ||
|
|
55e9551aaa | ||
|
|
c65afe6ba8 | ||
|
|
5745aaed22 | ||
|
|
b6f0f93c94 | ||
|
|
33b16fef43 | ||
|
|
9705c349c5 | ||
|
|
3de34af5d1 | ||
|
|
b3f3abfc94 | ||
|
|
783baec49c | ||
|
|
c1d9e7c6fb |
124
CHANGELOG.md
124
CHANGELOG.md
@@ -1,5 +1,129 @@
|
||||
# Changelog
|
||||
|
||||
## 1.8.2 (2015-09-03)
|
||||
|
||||
### Distribution:
|
||||
|
||||
- Fixes rare edge case of handling GNU LongLink and LongName entries.
|
||||
- Avoid buffering to tempfile when pushing to registry V2.
|
||||
- Fix ^C on docker pull.
|
||||
- Fix docker pull issues on client disconnection.
|
||||
- Fix issue that caused the daemon to panic when loggers weren't configured properly.
|
||||
- Fix goroutine leak pulling images from registry V2.
|
||||
### Runtime:
|
||||
|
||||
- Fix a bug mounting cgroups for docker daemons running inside docker containers.
|
||||
- Initialize log configuration properly.
|
||||
|
||||
### Client:
|
||||
|
||||
- Handle `-q` flag in `docker ps` properly when there is a default format.
|
||||
|
||||
### Networking:
|
||||
|
||||
- Fix several corner cases with netlink.
|
||||
|
||||
### Contrib:
|
||||
|
||||
- Fix several issues with bash completion.
|
||||
|
||||
## 1.8.1 (2015-08-12)
|
||||
|
||||
### Distribution
|
||||
|
||||
- Fix a bug where pushing multiple tags would result in invalid images
|
||||
|
||||
## 1.8.0 (2015-08-11)
|
||||
|
||||
### Distribution
|
||||
|
||||
+ Trusted pull, push and build, disabled by default
|
||||
* Make tar layers deterministic between registries
|
||||
* Don't allow deleting the image of running containers
|
||||
* Check if a tag name to load is a valid digest
|
||||
* Allow one character repository names
|
||||
* Add a more accurate error description for invalid tag name
|
||||
* Make build cache ignore mtime
|
||||
|
||||
### Cli
|
||||
|
||||
+ Add support for DOCKER_CONFIG/--config to specify config file dir
|
||||
+ Add --type flag for docker inspect command
|
||||
+ Add formatting options to `docker ps` with `--format`
|
||||
+ Replace `docker -d` with new subcommand `docker daemon`
|
||||
* Zsh completion updates and improvements
|
||||
* Add some missing events to bash completion
|
||||
* Support daemon urls with base paths in `docker -H`
|
||||
* Validate status= filter to docker ps
|
||||
* Display when a container is in --net=host in docker ps
|
||||
* Extend docker inspect to export image metadata related to graph driver
|
||||
* Restore --default-gateway{,-v6} daemon options
|
||||
* Add missing unpublished ports in docker ps
|
||||
* Allow duration strings in `docker events` as --since/--until
|
||||
* Expose more mounts information in `docker inspect`
|
||||
|
||||
### Runtime
|
||||
|
||||
+ Add new Fluentd logging driver
|
||||
+ Allow `docker import` to load from local files
|
||||
+ Add logging driver for GELF via UDP
|
||||
+ Allow to copy files from host to containers with `docker cp`
|
||||
+ Promote volume drivers from experimental to master
|
||||
+ Add rollover log driver, and --log-driver-opts flag
|
||||
+ Add memory swappiness tuning options
|
||||
* Remove cgroup read-only flag when privileged
|
||||
* Make /proc, /sys, & /dev readonly for readonly containers
|
||||
* Add cgroup bind mount by default
|
||||
* Overlay: Export metadata for container and image in `docker inspect`
|
||||
* Devicemapper: external device activation
|
||||
* Devicemapper: Compare uuid of base device on startup
|
||||
* Remove RC4 from the list of registry cipher suites
|
||||
* Add syslog-facility option
|
||||
* LXC execdriver compatibility with recent LXC versions
|
||||
* Mark LXC execriver as deprecated (to be removed with the migration to runc)
|
||||
|
||||
### Plugins
|
||||
|
||||
* Separate plugin sockets and specs locations
|
||||
* Allow TLS connections to plugins
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Add missing 'Names' field to /containers/json API output
|
||||
- Make `docker rmi --dangling` safe when pulling
|
||||
- Devicemapper: Change default basesize to 100G
|
||||
- Go Scheduler issue with sync.Mutex and gcc
|
||||
- Fix issue where Search API endpoint would panic due to empty AuthConfig
|
||||
- Set image canonical names correctly
|
||||
- Check dockerinit only if lxc driver is used
|
||||
- Fix ulimit usage of nproc
|
||||
- Always attach STDIN if -i,--interactive is specified
|
||||
- Show error messages when saving container state fails
|
||||
- Fixed incorrect assumption on --bridge=none treated as disable network
|
||||
- Check for invalid port specifications in host configuration
|
||||
- Fix endpoint leave failure for --net=host mode
|
||||
- Fix goroutine leak in the stats API if the container is not running
|
||||
- Check for apparmor file before reading it
|
||||
- Fix DOCKER_TLS_VERIFY being ignored
|
||||
- Set umask to the default on startup
|
||||
- Correct the message of pause and unpause a non-running container
|
||||
- Adjust disallowed CpuShares in container creation
|
||||
- ZFS: correctly apply selinux context
|
||||
- Display empty string instead of <nil> when IP opt is nil
|
||||
- `docker kill` returns error when container is not running
|
||||
- Fix COPY/ADD quoted/json form
|
||||
- Fix goroutine leak on logs -f with no output
|
||||
- Remove panic in nat package on invalid hostport
|
||||
- Fix container linking in Fedora 22
|
||||
- Fix error caused using default gateways outside of the allocated range
|
||||
- Format times in inspect command with a template as RFC3339Nano
|
||||
- Make registry client to accept 2xx and 3xx http status responses as successful
|
||||
- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order.
|
||||
- Fix error when the docker ps format was not valid.
|
||||
- Remove redundant ip forward check.
|
||||
- Fix issue trying to push images to repository mirrors.
|
||||
- Fix error cleaning up network entrypoints when there is an initialization issue.
|
||||
|
||||
## 1.7.1 (2015-07-14)
|
||||
|
||||
#### Runtime
|
||||
|
||||
@@ -127,7 +127,7 @@ RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint
|
||||
RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
|
||||
|
||||
# Install registry
|
||||
ENV REGISTRY_COMMIT 2317f721a3d8428215a2b65da4ae85212ed473b4
|
||||
ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
|
||||
|
||||
@@ -95,7 +95,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
|
||||
|
||||
f := *format
|
||||
if len(f) == 0 {
|
||||
if len(cli.PsFormat()) > 0 {
|
||||
if len(cli.PsFormat()) > 0 && !*quiet {
|
||||
f = cli.PsFormat()
|
||||
} else {
|
||||
f = "table"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:precise
|
||||
FROM ubuntu:precise
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion build-essential curl ca-certificates debhelper git libapparmor-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:wily
|
||||
FROM ubuntu:trusty
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:vivid
|
||||
FROM ubuntu:vivid
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"!
|
||||
#
|
||||
|
||||
FROM ubuntu-debootstrap:trusty
|
||||
FROM ubuntu:wily
|
||||
|
||||
RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -139,7 +139,7 @@ __docker_value_of_option() {
|
||||
local counter=$((command_pos + 1))
|
||||
while [ $counter -lt $cword ]; do
|
||||
case ${words[$counter]} in
|
||||
$option_glob )
|
||||
@($option_glob) )
|
||||
echo ${words[$counter + 1]}
|
||||
break
|
||||
;;
|
||||
@@ -229,11 +229,12 @@ __docker_log_driver_options() {
|
||||
# see docs/reference/logging/index.md
|
||||
local fluentd_options="fluentd-address fluentd-tag"
|
||||
local gelf_options="gelf-address gelf-tag"
|
||||
local json_file_options="max-file max-size"
|
||||
local syslog_options="syslog-address syslog-facility syslog-tag"
|
||||
|
||||
case $(__docker_value_of_option --log-driver) in
|
||||
'')
|
||||
COMPREPLY=( $( compgen -W "$fluentd_options $gelf_options $syslog_options" -S = -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "$fluentd_options $gelf_options $json_file_options $syslog_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
fluentd)
|
||||
COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) )
|
||||
@@ -241,6 +242,9 @@ __docker_log_driver_options() {
|
||||
gelf)
|
||||
COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
json-file)
|
||||
COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
syslog)
|
||||
COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
@@ -320,7 +324,7 @@ __docker_signals() {
|
||||
_docker_docker() {
|
||||
local boolean_options="
|
||||
$global_boolean_options
|
||||
--help -h
|
||||
--help
|
||||
--version -v
|
||||
"
|
||||
|
||||
@@ -338,8 +342,6 @@ _docker_docker() {
|
||||
;;
|
||||
esac
|
||||
|
||||
__docker_complete_log_driver_options && return
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) )
|
||||
@@ -460,7 +462,7 @@ _docker_create() {
|
||||
_docker_daemon() {
|
||||
local boolean_options="
|
||||
$global_boolean_options
|
||||
--help -h
|
||||
--help
|
||||
--icc=false
|
||||
--ip-forward=false
|
||||
--ip-masq=false
|
||||
@@ -512,7 +514,39 @@ _docker_daemon() {
|
||||
return
|
||||
;;
|
||||
--storage-driver|-s)
|
||||
COMPREPLY=( $( compgen -W "aufs devicemapper btrfs overlay" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) )
|
||||
COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) )
|
||||
return
|
||||
;;
|
||||
--storage-opt)
|
||||
local devicemapper_options="
|
||||
dm.basesize
|
||||
dm.blkdiscard
|
||||
dm.blocksize
|
||||
dm.fs
|
||||
dm.loopdatasize
|
||||
dm.loopmetadatasize
|
||||
dm.mkfsarg
|
||||
dm.mountopt
|
||||
dm.override_udev_sync_check
|
||||
dm.thinpooldev
|
||||
"
|
||||
local zfs_options="zfs.fsname"
|
||||
|
||||
case $(__docker_value_of_option '--storage-driver|-s') in
|
||||
'')
|
||||
COMPREPLY=( $( compgen -W "$devicemapper_options $zfs_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
devicemapper)
|
||||
COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
zfs)
|
||||
COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
compopt -o nospace
|
||||
return
|
||||
;;
|
||||
--log-level|-l)
|
||||
@@ -528,6 +562,27 @@ _docker_daemon() {
|
||||
;;
|
||||
esac
|
||||
|
||||
__docker_complete_log_driver_options && return
|
||||
|
||||
case "${words[$cword-2]}$prev=" in
|
||||
*dm.blkdiscard=*)
|
||||
COMPREPLY=( $( compgen -W "false true" -- "${cur#=}" ) )
|
||||
return
|
||||
;;
|
||||
*dm.fs=*)
|
||||
COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur#=}" ) )
|
||||
return
|
||||
;;
|
||||
*dm.override_udev_sync_check=*)
|
||||
COMPREPLY=( $( compgen -W "false true" -- "${cur#=}" ) )
|
||||
return
|
||||
;;
|
||||
*dm.thinpooldev=*)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
@@ -869,7 +924,7 @@ _docker_ps() {
|
||||
compopt -o nospace
|
||||
return
|
||||
;;
|
||||
-n)
|
||||
--format|-n)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
@@ -893,7 +948,7 @@ _docker_ps() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--all -a --before --filter -f --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--all -a --before --filter -f --format --help --latest -l -n --no-trunc --quiet -q --size -s --since" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
@@ -998,15 +1053,16 @@ _docker_rmi() {
|
||||
_docker_run() {
|
||||
local options_with_args="
|
||||
--add-host
|
||||
--blkio-weight
|
||||
--attach -a
|
||||
--blkio-weight
|
||||
--cap-add
|
||||
--cap-drop
|
||||
--cgroup-parent
|
||||
--cidfile
|
||||
--cpuset
|
||||
--cpu-period
|
||||
--cpu-quota
|
||||
--cpuset-cpus
|
||||
--cpuset-mems
|
||||
--cpu-shares -c
|
||||
--device
|
||||
--dns
|
||||
@@ -1018,8 +1074,8 @@ _docker_run() {
|
||||
--group-add
|
||||
--hostname -h
|
||||
--ipc
|
||||
--label -l
|
||||
--label-file
|
||||
--label -l
|
||||
--link
|
||||
--log-driver
|
||||
--log-opt
|
||||
@@ -1027,14 +1083,15 @@ _docker_run() {
|
||||
--mac-address
|
||||
--memory -m
|
||||
--memory-swap
|
||||
--memory-swappiness
|
||||
--name
|
||||
--net
|
||||
--pid
|
||||
--publish -p
|
||||
--restart
|
||||
--security-opt
|
||||
--user -u
|
||||
--ulimit
|
||||
--user -u
|
||||
--uts
|
||||
--volumes-from
|
||||
--volume -v
|
||||
@@ -1042,8 +1099,10 @@ _docker_run() {
|
||||
"
|
||||
|
||||
local all_options="$options_with_args
|
||||
--disable-content-trust=false
|
||||
--help
|
||||
--interactive -i
|
||||
--oom-kill-disable
|
||||
--privileged
|
||||
--publish-all -P
|
||||
--read-only
|
||||
@@ -1053,7 +1112,7 @@ _docker_run() {
|
||||
[ "$command" = "run" ] && all_options="$all_options
|
||||
--detach -d
|
||||
--rm
|
||||
--sig-proxy
|
||||
--sig-proxy=false
|
||||
"
|
||||
|
||||
local options_with_args_glob=$(__docker_to_extglob "$options_with_args")
|
||||
|
||||
@@ -979,7 +979,7 @@ func getDefaultRouteMtu() (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
for _, r := range routes {
|
||||
if r.Default {
|
||||
if r.Default && r.Iface != nil {
|
||||
return r.Iface.MTU, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,6 +76,7 @@ func NewDaemonCli() *DaemonCli {
|
||||
|
||||
// TODO(tiborvass): remove InstallFlags?
|
||||
daemonConfig := new(daemon.Config)
|
||||
daemonConfig.LogConfig.Config = make(map[string]string)
|
||||
daemonConfig.InstallFlags(daemonFlags, presentInHelp)
|
||||
daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp)
|
||||
registryOptions := new(registry.Options)
|
||||
@@ -208,10 +209,6 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
||||
}()
|
||||
}
|
||||
|
||||
if cli.LogConfig.Config == nil {
|
||||
cli.LogConfig.Config = make(map[string]string)
|
||||
}
|
||||
|
||||
serverConfig := &apiserver.ServerConfig{
|
||||
Logging: true,
|
||||
EnableCors: cli.EnableCors,
|
||||
|
||||
@@ -28,7 +28,7 @@ func main() {
|
||||
flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet)
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ -h | --help | -v | --version ]\n\n")
|
||||
fmt.Fprint(os.Stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n")
|
||||
fmt.Fprint(os.Stdout, "A self-sufficient runtime for containers.\n\nOptions:\n")
|
||||
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
|
||||
@@ -11,111 +11,7 @@ weight = 7
|
||||
|
||||
# Using certificates for repository client verification
|
||||
|
||||
In [Running Docker with HTTPS](/articles/https), you learned that, by default,
|
||||
Docker runs via a non-networked Unix socket and TLS must be enabled in order
|
||||
to have the Docker client and the daemon communicate securely over HTTPS.
|
||||
|
||||
Now, you will see how to allow the Docker registry (i.e., *a server*) to
|
||||
verify that the Docker daemon (i.e., *a client*) has the right to access the
|
||||
images being hosted with *certificate-based client-server authentication*.
|
||||
|
||||
We will show you how to install a Certificate Authority (CA) root certificate
|
||||
for the registry and how to set the client TLS certificate for verification.
|
||||
|
||||
## Understanding the configuration
|
||||
|
||||
A custom certificate is configured by creating a directory under
|
||||
`/etc/docker/certs.d` using the same name as the registry's hostname (e.g.,
|
||||
`localhost`). All `*.crt` files are added to this directory as CA roots.
|
||||
|
||||
> **Note:**
|
||||
> In the absence of any root certificate authorities, Docker
|
||||
> will use the system default (i.e., host's root CA set).
|
||||
|
||||
The presence of one or more `<filename>.key/cert` pairs indicates to Docker
|
||||
that there are custom certificates required for access to the desired
|
||||
repository.
|
||||
|
||||
> **Note:**
|
||||
> If there are multiple certificates, each will be tried in alphabetical
|
||||
> order. If there is an authentication error (e.g., 403, 404, 5xx, etc.), Docker
|
||||
> will continue to try with the next certificate.
|
||||
|
||||
Our example is set up like this:
|
||||
|
||||
/etc/docker/certs.d/ <-- Certificate directory
|
||||
└── localhost <-- Hostname
|
||||
├── client.cert <-- Client certificate
|
||||
├── client.key <-- Client key
|
||||
└── localhost.crt <-- Registry certificate
|
||||
|
||||
## Creating the client certificates
|
||||
|
||||
You will use OpenSSL's `genrsa` and `req` commands to first generate an RSA
|
||||
key and then use the key to create the certificate.
|
||||
|
||||
$ openssl genrsa -out client.key 4096
|
||||
$ openssl req -new -x509 -text -key client.key -out client.cert
|
||||
|
||||
> **Warning:**:
|
||||
> Using TLS and managing a CA is an advanced topic.
|
||||
> You should be familiar with OpenSSL, x509, and TLS before
|
||||
> attempting to use them in production.
|
||||
|
||||
> **Warning:**
|
||||
> These TLS commands will only generate a working set of certificates on Linux.
|
||||
> The version of OpenSSL in Mac OS X is incompatible with the type of
|
||||
> certificate Docker requires.
|
||||
|
||||
## Testing the verification setup
|
||||
|
||||
You can test this setup by using Apache to host a Docker registry.
|
||||
For this purpose, you can copy a registry tree (containing images) inside
|
||||
the Apache root.
|
||||
|
||||
> **Note:**
|
||||
> You can find such an example [here](
|
||||
> http://people.gnome.org/~alexl/v1.tar.gz) - which contains the busybox image.
|
||||
|
||||
Once you set up the registry, you can use the following Apache configuration
|
||||
to implement certificate-based protection.
|
||||
|
||||
# This must be in the root context, otherwise it causes a re-negotiation
|
||||
# which is not supported by the TLS implementation in go
|
||||
SSLVerifyClient optional_no_ca
|
||||
|
||||
<Location /v1>
|
||||
Action cert-protected /cgi-bin/cert.cgi
|
||||
SetHandler cert-protected
|
||||
|
||||
Header set x-docker-registry-version "0.6.2"
|
||||
SetEnvIf Host (.*) custom_host=$1
|
||||
Header set X-Docker-Endpoints "%{custom_host}e"
|
||||
</Location>
|
||||
|
||||
Save the above content as `/etc/httpd/conf.d/registry.conf`, and
|
||||
continue with creating a `cert.cgi` file under `/var/www/cgi-bin/`.
|
||||
|
||||
#!/bin/bash
|
||||
if [ "$HTTPS" != "on" ]; then
|
||||
echo "Status: 403 Not using SSL"
|
||||
echo "x-docker-registry-version: 0.6.2"
|
||||
echo
|
||||
exit 0
|
||||
fi
|
||||
if [ "$SSL_CLIENT_VERIFY" == "NONE" ]; then
|
||||
echo "Status: 403 Client certificate invalid"
|
||||
echo "x-docker-registry-version: 0.6.2"
|
||||
echo
|
||||
exit 0
|
||||
fi
|
||||
echo "Content-length: $(stat --printf='%s' $PATH_TRANSLATED)"
|
||||
echo "x-docker-registry-version: 0.6.2"
|
||||
echo "X-Docker-Endpoints: $SERVER_NAME"
|
||||
echo "X-Docker-Size: 0"
|
||||
echo
|
||||
|
||||
cat $PATH_TRANSLATED
|
||||
|
||||
This CGI script will ensure that all requests to `/v1` *without* a valid
|
||||
certificate will be returned with a `403` (i.e., HTTP forbidden) error.
|
||||
The orginal content was deprecated. For information about configuring
|
||||
cerficates, see [deploying a registry
|
||||
server](http://docs.docker.com/registry/deploying/). To reach an older version
|
||||
of this content, refer to an older version of the documentation.
|
||||
|
||||
@@ -11,81 +11,8 @@ weight = 8
|
||||
|
||||
# Run a local registry mirror
|
||||
|
||||
## Why?
|
||||
|
||||
If you have multiple instances of Docker running in your environment
|
||||
(e.g., multiple physical or virtual machines, all running the Docker
|
||||
daemon), each time one of them requires an image that it doesn't have
|
||||
it will go out to the internet and fetch it from the public Docker
|
||||
registry. By running a local registry mirror, you can keep most of the
|
||||
image fetch traffic on your local network.
|
||||
|
||||
## How does it work?
|
||||
|
||||
The first time you request an image from your local registry mirror,
|
||||
it pulls the image from the public Docker registry and stores it locally
|
||||
before handing it back to you. On subsequent requests, the local registry
|
||||
mirror is able to serve the image from its own storage.
|
||||
|
||||
## How do I set up a local registry mirror?
|
||||
|
||||
There are two steps to set up and use a local registry mirror.
|
||||
|
||||
### Step 1: Configure your Docker daemons to use the local registry mirror
|
||||
|
||||
You will need to pass the `--registry-mirror` option to your Docker daemon on
|
||||
startup:
|
||||
|
||||
docker daemon --registry-mirror=http://<my-docker-mirror-host>
|
||||
|
||||
For example, if your mirror is serving on `http://10.0.0.2:5000`, you would run:
|
||||
|
||||
docker daemon --registry-mirror=http://10.0.0.2:5000
|
||||
|
||||
**NOTE:**
|
||||
Depending on your local host setup, you may be able to add the
|
||||
`--registry-mirror` options to the `DOCKER_OPTS` variable in
|
||||
`/etc/default/docker`.
|
||||
|
||||
### Step 2: Run the local registry mirror
|
||||
|
||||
You will need to start a local registry mirror service. The
|
||||
[`registry` image](https://registry.hub.docker.com/_/registry/) provides this
|
||||
functionality. For example, to run a local registry mirror that serves on
|
||||
port `5000` and mirrors the content at `registry-1.docker.io`:
|
||||
|
||||
docker run -p 5000:5000 \
|
||||
-e STANDALONE=false \
|
||||
-e MIRROR_SOURCE=https://registry-1.docker.io \
|
||||
-e MIRROR_SOURCE_INDEX=https://index.docker.io \
|
||||
registry
|
||||
|
||||
## Test it out
|
||||
|
||||
With your mirror running, pull an image that you haven't pulled before (using
|
||||
`time` to time it):
|
||||
|
||||
$ time docker pull node:latest
|
||||
Pulling repository node
|
||||
[...]
|
||||
|
||||
real 1m14.078s
|
||||
user 0m0.176s
|
||||
sys 0m0.120s
|
||||
|
||||
Now, remove the image from your local machine:
|
||||
|
||||
$ docker rmi node:latest
|
||||
|
||||
Finally, re-pull the image:
|
||||
|
||||
$ time docker pull node:latest
|
||||
Pulling repository node
|
||||
[...]
|
||||
|
||||
real 0m51.376s
|
||||
user 0m0.120s
|
||||
sys 0m0.116s
|
||||
|
||||
The second time around, the local registry mirror served the image from storage,
|
||||
avoiding a trip out to the internet to refetch it.
|
||||
The orginal content was deprecated. [An archived
|
||||
version](https://docs.docker.com/v1.6/articles/registry_mirror) is available in
|
||||
the 1.7 documentation. For information about configuring mirrors with the latest
|
||||
Docker Registry version, please file a support request with [the Distribution
|
||||
project](https://github.com/docker/distribution/issues).
|
||||
|
||||
@@ -96,7 +96,7 @@ which is officially supported by Docker.
|
||||
>command fails for the Docker repo during installation. To work around this,
|
||||
>add the key directly using the following:
|
||||
>
|
||||
> $ wget -qO- https://get.docker.com/gpg | sudo apt-key add -
|
||||
> $ curl -sSL https://get.docker.com/gpg | sudo apt-key add -
|
||||
|
||||
### Uninstallation
|
||||
|
||||
|
||||
@@ -206,6 +206,24 @@ If you need to add an HTTP Proxy, set a different directory or partition for the
|
||||
Docker runtime files, or make other customizations, read our Systemd article to
|
||||
learn how to [customize your Systemd Docker daemon options](/articles/systemd/).
|
||||
|
||||
## Running Docker with a manually-defined network
|
||||
|
||||
If you manually configure your network using `systemd-network` with `systemd` version 219 or higher, containers you start with Docker may be unable to access your network.
|
||||
Beginning with version 220, the forwarding setting for a given network (`net.ipv4.conf.<interface>.forwarding`) defaults to *off*. This setting prevents IP forwarding. It also conflicts with Docker which enables the `net.ipv4.conf.all.forwarding` setting within a container.
|
||||
|
||||
To work around this, edit the `<interface>.network` file in
|
||||
`/usr/lib/systemd/network/` on your Docker host (ex: `/usr/lib/systemd/network/80-container-host0.network`) add the following block:
|
||||
|
||||
```
|
||||
[Network]
|
||||
...
|
||||
IPForward=kernel
|
||||
# OR
|
||||
IPForward=true
|
||||
...
|
||||
```
|
||||
|
||||
This configuration allows IP forwarding from the container as expected.
|
||||
|
||||
## Uninstall
|
||||
|
||||
|
||||
@@ -10,37 +10,34 @@ parent = "smn_engine"
|
||||
|
||||
# Mac OS X
|
||||
|
||||
You can install Docker using Boot2Docker to run `docker` commands at your command-line.
|
||||
Choose this installation if you are familiar with the command-line or plan to
|
||||
contribute to the Docker project on GitHub.
|
||||
> **Note**: This release of Docker deprecates the Boot2Docker command line in
|
||||
> favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as
|
||||
> well as the other Docker tools.
|
||||
|
||||
[<img src="/installation/images/kitematic.png" alt="Download Kitematic"
|
||||
style="float:right;">](https://kitematic.com/download)
|
||||
You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools:
|
||||
|
||||
Alternatively, you may want to try <a id="inlinelink" href="https://kitematic.com/"
|
||||
target="_blank">Kitematic</a>, an application that lets you set up Docker and
|
||||
run containers using a graphical user interface (GUI).
|
||||
|
||||
## Command-line Docker with Boot2Docker
|
||||
* Docker Machine for running the `docker-machine` binary
|
||||
* Docker Engine for running the `docker` binary
|
||||
* Docker Compose for running the `docker-compose` binary
|
||||
* Kitematic, the Docker GUI
|
||||
* a shell preconfigured for a Docker command-line environment
|
||||
* Oracle VM VirtualBox
|
||||
|
||||
Because the Docker daemon uses Linux-specific kernel features, you can't run
|
||||
Docker natively in OS X. Instead, you must install the Boot2Docker application.
|
||||
The application includes a VirtualBox Virtual Machine (VM), Docker itself, and the
|
||||
Boot2Docker management tool.
|
||||
|
||||
The Boot2Docker management tool is a lightweight Linux virtual machine made
|
||||
specifically to run the Docker daemon on Mac OS X. The VirtualBox VM runs
|
||||
completely from RAM, is a small ~24MB download, and boots in approximately 5s.
|
||||
Docker natively in OS X. Instead, you must use `docker-machine` to create and
|
||||
attach to a virtual machine (VM). This machine is a Linux VM that hosts Docker
|
||||
for you on your Mac.
|
||||
|
||||
**Requirements**
|
||||
|
||||
Your Mac must be running OS X 10.6 "Snow Leopard" or newer to run Boot2Docker.
|
||||
Your Mac must be running OS X 10.8 "Mountain Lion" or newer to install the
|
||||
Docker Toolbox.
|
||||
|
||||
### Learn the key concepts before installing
|
||||
|
||||
In a Docker installation on Linux, your machine is both the localhost and the
|
||||
Docker host. In networking, localhost means your computer. The Docker host is
|
||||
the machine on which the containers run.
|
||||
In a Docker installation on Linux, your physical machine is both the localhost
|
||||
and the Docker host. In networking, localhost means your computer. The Docker
|
||||
host is the computer on which the containers run.
|
||||
|
||||
On a typical Linux installation, the Docker client, the Docker daemon, and any
|
||||
containers run directly on your localhost. This means you can address ports on a
|
||||
@@ -49,135 +46,243 @@ Docker container using standard localhost addressing such as `localhost:8000` or
|
||||
|
||||

|
||||
|
||||
In an OS X installation, the `docker` daemon is running inside a Linux virtual
|
||||
machine provided by Boot2Docker.
|
||||
In an OS X installation, the `docker` daemon is running inside a Linux VM called
|
||||
`default`. The `default` is a lightweight Linux VM made specifically to run
|
||||
the Docker daemon on Mac OS X. The VM runs completely from RAM, is a small ~24MB
|
||||
download, and boots in approximately 5s.
|
||||
|
||||

|
||||
|
||||
In OS X, the Docker host address is the address of the Linux VM.
|
||||
When you start the `boot2docker` process, the VM is assigned an IP address. Under
|
||||
`boot2docker` ports on a container map to ports on the VM. To see this in
|
||||
In OS X, the Docker host address is the address of the Linux VM. When you start
|
||||
the VM with `docker-machine` it is assigned an IP address. When you start a
|
||||
container, the ports on a container map to ports on the VM. To see this in
|
||||
practice, work through the exercises on this page.
|
||||
|
||||
|
||||
### Installation
|
||||
|
||||
1. Go to the [boot2docker/osx-installer ](
|
||||
https://github.com/boot2docker/osx-installer/releases/latest) release page.
|
||||
If you have VirtualBox running, you must shut it down before running the
|
||||
installer.
|
||||
|
||||
4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
|
||||
section.
|
||||
1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page.
|
||||
|
||||
3. Install Boot2Docker by double-clicking the package.
|
||||
2. Click the installer link to download.
|
||||
|
||||
The installer places Boot2Docker and VirtualBox in your "Applications" folder.
|
||||
3. Install Docker Toolbox by double-clicking the package or by right-clicking
|
||||
and choosing "Open" from the pop-up menu.
|
||||
|
||||
The installation places the `docker` and `boot2docker` binaries in your
|
||||
`/usr/local/bin` directory.
|
||||
The installer launches the "Install Docker Toolbox" dialog.
|
||||
|
||||

|
||||
|
||||
4. Press "Continue" to install the toolbox.
|
||||
|
||||
The installer presents you with options to customize the standard
|
||||
installation.
|
||||
|
||||

|
||||
|
||||
By default, the standard Docker Toolbox installation:
|
||||
|
||||
* installs binaries for the Docker tools in `/usr/local/bin`
|
||||
* makes these binaries available to all users
|
||||
* updates any existing VirtualBox installation
|
||||
|
||||
Change these defaults by pressing "Customize" or "Change
|
||||
Install Location."
|
||||
|
||||
5. Press "Install" to perform the standard installation.
|
||||
|
||||
The system prompts you for your password.
|
||||
|
||||

|
||||
|
||||
6. Provide your password to continue with the installation.
|
||||
|
||||
When it completes, the installer provides you with some information you can
|
||||
use to complete some common tasks.
|
||||
|
||||

|
||||
|
||||
7. Press "Close" to exit.
|
||||
|
||||
|
||||
## Start the Boot2Docker Application
|
||||
## Running a Docker Container
|
||||
|
||||
To run a Docker container, you first start the `boot2docker` VM and then issue
|
||||
`docker` commands to create, load, and manage containers. You can launch
|
||||
`boot2docker` from your Applications folder or from the command line.
|
||||
To run a Docker container, you:
|
||||
|
||||
> **NOTE**: Boot2Docker is designed as a development tool. You should not use
|
||||
> it in production environments.
|
||||
* create a new (or start an existing) Docker virtual machine
|
||||
* switch your environment to your new VM
|
||||
* use the `docker` client to create, load, and manage containers
|
||||
|
||||
### From the Applications folder
|
||||
Once you create a machine, you can reuse it as often as you like. Like any
|
||||
VirtualBox VM, it maintains its configuration between uses.
|
||||
|
||||
When you launch the "Boot2Docker" application from your "Applications" folder, the
|
||||
application:
|
||||
There are two ways to use the installed tools, from the Docker Quickstart Terminal or
|
||||
[from your shell](#from-your-shell).
|
||||
|
||||
* opens a terminal window
|
||||
### From the Docker Quickstart Terminal
|
||||
|
||||
* creates a $HOME/.boot2docker directory
|
||||
1. Open the "Applications" folder or the "Launchpad".
|
||||
|
||||
* creates a VirtualBox ISO and certs
|
||||
2. Find the Docker Quickstart Terminal and double-click to launch it.
|
||||
|
||||
* starts a VirtualBox VM running the `docker` daemon
|
||||
The application:
|
||||
|
||||
Once the launch completes, you can run `docker` commands. A good way to verify
|
||||
your setup succeeded is to run the `hello-world` container.
|
||||
* opens a terminal window
|
||||
* creates a VM called `default` if it doesn't exists, starts the VM if it does
|
||||
* points the terminal environment to this VM
|
||||
|
||||
$ docker run hello-world
|
||||
Unable to find image 'hello-world:latest' locally
|
||||
511136ea3c5a: Pull complete
|
||||
31cbccb51277: Pull complete
|
||||
e45a5af57b00: Pull complete
|
||||
hello-world:latest: The image you are pulling has been verified.
|
||||
Important: image verification is a tech preview feature and should not be
|
||||
relied on to provide security.
|
||||
Status: Downloaded newer image for hello-world:latest
|
||||
Hello from Docker.
|
||||
This message shows that your installation appears to be working correctly.
|
||||
Once the launch completes, the Docker Quickstart Terminal reports:
|
||||
|
||||
To generate this message, Docker took the following steps:
|
||||
1. The Docker client contacted the Docker daemon.
|
||||
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
|
||||
(Assuming it was not already locally available.)
|
||||
3. The Docker daemon created a new container from that image which runs the
|
||||
executable that produces the output you are currently reading.
|
||||
4. The Docker daemon streamed that output to the Docker client, which sent it
|
||||
to your terminal.
|
||||

|
||||
|
||||
To try something more ambitious, you can run an Ubuntu container with:
|
||||
$ docker run -it ubuntu bash
|
||||
Now, you can run `docker` commands.
|
||||
|
||||
For more examples and ideas, visit:
|
||||
http://docs.docker.com/userguide/
|
||||
3. Verify your setup succeeded by running the `hello-world` container.
|
||||
|
||||
$ docker run hello-world
|
||||
Unable to find image 'hello-world:latest' locally
|
||||
511136ea3c5a: Pull complete
|
||||
31cbccb51277: Pull complete
|
||||
e45a5af57b00: Pull complete
|
||||
hello-world:latest: The image you are pulling has been verified.
|
||||
Important: image verification is a tech preview feature and should not be
|
||||
relied on to provide security.
|
||||
Status: Downloaded newer image for hello-world:latest
|
||||
Hello from Docker.
|
||||
This message shows that your installation appears to be working correctly.
|
||||
|
||||
To generate this message, Docker took the following steps:
|
||||
1. The Docker client contacted the Docker daemon.
|
||||
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
|
||||
(Assuming it was not already locally available.)
|
||||
3. The Docker daemon created a new container from that image which runs the
|
||||
executable that produces the output you are currently reading.
|
||||
4. The Docker daemon streamed that output to the Docker client, which sent it
|
||||
to your terminal.
|
||||
|
||||
To try something more ambitious, you can run an Ubuntu container with:
|
||||
$ docker run -it ubuntu bash
|
||||
|
||||
For more examples and ideas, visit:
|
||||
http://docs.docker.com/userguide/
|
||||
|
||||
|
||||
A more typical way to start and stop `boot2docker` is using the command line.
|
||||
A more typical way to interact with the Docker tools is from your regular shell command line.
|
||||
|
||||
### From your command line
|
||||
### From your shell
|
||||
|
||||
Initialize and run `boot2docker` from the command line, do the following:
|
||||
This section assumes you are running a Bash shell. You may be running a
|
||||
different shell such as C Shell but the commands are the same.
|
||||
|
||||
1. Create a new Boot2Docker VM.
|
||||
1. Create a new Docker VM.
|
||||
|
||||
$ boot2docker init
|
||||
$ docker-machine create --driver virtualbox default
|
||||
Creating VirtualBox VM...
|
||||
Creating SSH key...
|
||||
Starting VirtualBox VM...
|
||||
Starting VM...
|
||||
To see how to connect Docker to this machine, run: docker-machine env default
|
||||
|
||||
This creates a new virtual machine. You only need to run this command once.
|
||||
This creates a new `default` in VirtualBox.
|
||||
|
||||
2. Start the `boot2docker` VM.
|
||||

|
||||
|
||||
$ boot2docker start
|
||||
The command also creates a machine configuration in the
|
||||
`~/.docker/machine/machines/default` directory. You only need to run the
|
||||
`create` command once. Then, you can use `docker-machine` to start, stop,
|
||||
query, and otherwise manage the VM from the command line.
|
||||
|
||||
3. Display the environment variables for the Docker client.
|
||||
2. List your available machines.
|
||||
|
||||
$ boot2docker shellinit
|
||||
Writing /Users/mary/.boot2docker/certs/boot2docker-vm/ca.pem
|
||||
Writing /Users/mary/.boot2docker/certs/boot2docker-vm/cert.pem
|
||||
Writing /Users/mary/.boot2docker/certs/boot2docker-vm/key.pem
|
||||
export DOCKER_HOST=tcp://192.168.59.103:2376
|
||||
export DOCKER_CERT_PATH=/Users/mary/.boot2docker/certs/boot2docker-vm
|
||||
export DOCKER_TLS_VERIFY=1
|
||||
$ docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
default * virtualbox Running tcp://192.168.99.101:2376
|
||||
|
||||
The specific paths and address on your machine will be different.
|
||||
If you have previously installed the deprecated Boot2Docker application or
|
||||
run the Docker Quickstart Terminal, you may have a `dev` VM as well. When you
|
||||
created `default`, the `docker-machine` command provided instructions
|
||||
for learning how to connect the VM.
|
||||
|
||||
4. To set the environment variables in your shell do the following:
|
||||
3. Get the environment commands for your new VM.
|
||||
|
||||
$ eval "$(boot2docker shellinit)"
|
||||
$ docker-machine env default
|
||||
export DOCKER_TLS_VERIFY="1"
|
||||
export DOCKER_HOST="tcp://192.168.99.101:2376"
|
||||
export DOCKER_CERT_PATH="/Users/mary/.docker/machine/machines/default"
|
||||
export DOCKER_MACHINE_NAME="default"
|
||||
# Run this command to configure your shell:
|
||||
# eval "$(docker-machine env default)"
|
||||
|
||||
You can also set them manually by using the `export` commands `boot2docker`
|
||||
returns.
|
||||
4. Connect your shell to the `default` machine.
|
||||
|
||||
$ eval "$(docker-machine env default)"
|
||||
|
||||
5. Run the `hello-world` container to verify your setup.
|
||||
|
||||
$ docker run hello-world
|
||||
|
||||
|
||||
## Basic Boot2Docker exercises
|
||||
## Learn about your Toolbox installation
|
||||
|
||||
At this point, you should have `boot2docker` running and the `docker` client
|
||||
environment initialized. To verify this, run the following commands:
|
||||
Toolbox installs the Docker Engine binary, the Docker binary on your system. When you
|
||||
use the Docker Quickstart Terminal or create a `default` manually, Docker
|
||||
Machine updates the `~/.docker/machine/machines/default` folder to your
|
||||
system. This folder contains the configuration for the VM.
|
||||
|
||||
$ boot2docker status
|
||||
$ docker version
|
||||
You can create multiple VMs on your system with Docker Machine. So, you may have
|
||||
more than one VM folder if you have more than one VM. To remove a VM, use the
|
||||
`docker-machine rm <machine-name>` command.
|
||||
|
||||
Work through this section to try some practical container tasks using `boot2docker` VM.
|
||||
## Migrate from Boot2Docker
|
||||
|
||||
If you were using Boot2Docker previously, you have a pre-existing Docker
|
||||
`boot2docker-vm` VM on your local system. To allow Docker Machine to manage
|
||||
this older VM, you can migrate it.
|
||||
|
||||
1. Open a terminal or the Docker CLI on your system.
|
||||
|
||||
2. Type the following command.
|
||||
|
||||
$ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm
|
||||
|
||||
3. Use the `docker-machine` command to interact with the migrated VM.
|
||||
|
||||
The `docker-machine` subcommands are slightly different than the `boot2docker`
|
||||
subcommands. The table below lists the equivalent `docker-machine` subcommand
|
||||
and what it does:
|
||||
|
||||
| `boot2docker` | `docker-machine` | `docker-machine` description |
|
||||
|----------------|------------------|----------------------------------------------------------|
|
||||
| init | create | Creates a new docker host. |
|
||||
| up | start | Starts a stopped machine. |
|
||||
| ssh | ssh | Runs a command or interactive ssh session on the machine.|
|
||||
| save | - | Not applicable. |
|
||||
| down | stop | Stops a running machine. |
|
||||
| poweroff | stop | Stops a running machine. |
|
||||
| reset | restart | Restarts a running machine. |
|
||||
| config | inspect | Prints machine configuration details. |
|
||||
| status | ls | Lists all machines and their status. |
|
||||
| info | inspect | Displays a machine's details. |
|
||||
| ip | ip | Displays the machine's ip address. |
|
||||
| shellinit | env | Displays shell commands needed to configure your shell to interact with a machine |
|
||||
| delete | rm | Removes a machine. |
|
||||
| download | - | Not applicable. |
|
||||
| upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. |
|
||||
|
||||
|
||||
## Example of Docker on Mac OS X
|
||||
|
||||
Work through this section to try some practical container tasks on a VM. At this
|
||||
point, you should have a VM running and be connected to it through your shell.
|
||||
To verify this, run the following commands:
|
||||
|
||||
$ docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
dev * virtualbox Running tcp://192.168.99.100:2376
|
||||
|
||||
The `ACTIVE` machine, in this case `dev`, is the one your environment is pointing to.
|
||||
|
||||
### Access container ports
|
||||
|
||||
@@ -212,11 +317,11 @@ Work through this section to try some practical container tasks using `boot2dock
|
||||
|
||||
This didn't work. The reason it doesn't work is your `DOCKER_HOST` address is
|
||||
not the localhost address (0.0.0.0) but is instead the address of the
|
||||
`boot2docker` VM.
|
||||
your Docker VM.
|
||||
|
||||
5. Get the address of the `boot2docker` VM.
|
||||
5. Get the address of the `dev` VM.
|
||||
|
||||
$ boot2docker ip
|
||||
$ docker-machine ip dev
|
||||
192.168.59.103
|
||||
|
||||
6. Enter the `http://192.168.59.103:49157` address in your browser:
|
||||
@@ -232,7 +337,7 @@ Work through this section to try some practical container tasks using `boot2dock
|
||||
|
||||
### Mount a volume on the container
|
||||
|
||||
When you start `boot2docker`, it automatically shares your `/Users` directory
|
||||
When you start a container it automatically shares your `/Users/username` directory
|
||||
with the VM. You can use this share point to mount directories onto your container.
|
||||
The next exercise demonstrates how to do this.
|
||||
|
||||
@@ -254,7 +359,8 @@ The next exercise demonstrates how to do this.
|
||||
|
||||
5. Start a new `nginx` container and replace the `html` folder with your `site` directory.
|
||||
|
||||
$ docker run -d -P -v $HOME/site:/usr/share/nginx/html --name mysite nginx
|
||||
$ docker run -d -P -v $HOME/site:/usr/share/nginx/html \
|
||||
--name mysite nginx
|
||||
|
||||
6. Get the `mysite` container's port.
|
||||
|
||||
@@ -274,85 +380,53 @@ The next exercise demonstrates how to do this.
|
||||
|
||||

|
||||
|
||||
9. Stop and then remove your running `mysite` container.
|
||||
10. Stop and then remove your running `mysite` container.
|
||||
|
||||
$ docker stop mysite
|
||||
$ docker rm mysite
|
||||
|
||||
## Upgrade Boot2Docker
|
||||
|
||||
If you running Boot2Docker 1.4.1 or greater, you can upgrade Boot2Docker from
|
||||
the command line. If you are running an older version, you should use the
|
||||
package provided by the `boot2docker` repository.
|
||||
## Upgrade Docker Toolbox
|
||||
|
||||
### From the command line
|
||||
|
||||
To upgrade from 1.4.1 or greater, you can do this:
|
||||
|
||||
1. Open a terminal on your local machine.
|
||||
|
||||
2. Stop the `boot2docker` application.
|
||||
|
||||
$ boot2docker stop
|
||||
|
||||
3. Run the upgrade command.
|
||||
|
||||
$ boot2docker upgrade
|
||||
To upgrade Docker Toolbox, download an re-run [the Docker Toolbox
|
||||
installer](https://docker.com/toolbox/).
|
||||
|
||||
|
||||
### Use the installer
|
||||
## Uninstall Docker Toolbox
|
||||
|
||||
To upgrade any version of Boot2Docker, do this:
|
||||
To uninstall, do the following:
|
||||
|
||||
1. Open a terminal on your local machine.
|
||||
1. List your machines.
|
||||
|
||||
2. Stop the `boot2docker` application.
|
||||
$ docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
dev * virtualbox Running tcp://192.168.99.100:2376
|
||||
my-docker-machine virtualbox Stopped
|
||||
default virtualbox Stopped
|
||||
|
||||
$ boot2docker stop
|
||||
2. Remove each machine.
|
||||
|
||||
3. Go to the [boot2docker/osx-installer ](
|
||||
https://github.com/boot2docker/osx-installer/releases/latest) release page.
|
||||
$ docker-machine rm dev
|
||||
Successfully removed dev
|
||||
|
||||
4. Download Boot2Docker by clicking `Boot2Docker-x.x.x.pkg` in the "Downloads"
|
||||
section.
|
||||
Removing a machine deletes its VM from VirtualBox and from the
|
||||
`~/.docker/machine/machines` directory.
|
||||
|
||||
2. Install Boot2Docker by double-clicking the package.
|
||||
3. Remove the Docker Quickstart Terminal and Kitematic from your "Applications" folder.
|
||||
|
||||
The installer places Boot2Docker in your "Applications" folder.
|
||||
4. Remove the `docker`, `docker-compose`, and `docker-machine` commands from the `/usr/local/bin` folder.
|
||||
|
||||
$ rm /usr/local/bin/docker
|
||||
|
||||
5. Delete the `~/.docker` folder from your system.
|
||||
|
||||
|
||||
## Uninstallation
|
||||
## Learning more
|
||||
|
||||
1. Go to the [boot2docker/osx-installer ](
|
||||
https://github.com/boot2docker/osx-installer/releases/latest) release page.
|
||||
Use `docker-machine help` to list the full command line reference for Docker Machine. For more
|
||||
information about using SSH or SCP to access a VM, see [the Docker Machine
|
||||
documentation](https://docs.docker.com/machine/).
|
||||
|
||||
2. Download the source code by clicking `Source code (zip)` or
|
||||
`Source code (tar.gz)` in the "Downloads" section.
|
||||
|
||||
3. Extract the source code.
|
||||
|
||||
4. Open a terminal on your local machine.
|
||||
|
||||
5. Change to the directory where you extracted the source code:
|
||||
|
||||
$ cd <path to extracted source code>
|
||||
|
||||
6. Make sure the uninstall.sh script is executable:
|
||||
|
||||
$ chmod +x uninstall.sh
|
||||
|
||||
7. Run the uninstall.sh script:
|
||||
|
||||
$ ./uninstall.sh
|
||||
|
||||
|
||||
## Learning more and acknowledgement
|
||||
|
||||
Use `boot2docker help` to list the full command line reference. For more
|
||||
information about using SSH or SCP to access the Boot2Docker VM, see the README
|
||||
at [Boot2Docker repository](https://github.com/boot2docker/boot2docker).
|
||||
|
||||
Thanks to Chris Jones whose [blog](http://viget.com/extend/how-to-use-docker-on-os-x-the-missing-guide)
|
||||
inspired me to redo this page.
|
||||
|
||||
Continue with the [Docker User Guide](/userguide).
|
||||
You can continue with the [Docker User Guide](/userguide). If you are
|
||||
interested in using the Kitematic GUI, see the [Kitermatic user
|
||||
guide](/kitematic/userguide/).
|
||||
|
||||
@@ -111,18 +111,18 @@ install Docker using the following:
|
||||
|
||||
1. Log into your Ubuntu installation as a user with `sudo` privileges.
|
||||
|
||||
2. Verify that you have `wget` installed.
|
||||
2. Verify that you have `curl` installed.
|
||||
|
||||
$ which wget
|
||||
$ which curl
|
||||
|
||||
If `wget` isn't installed, install it after updating your manager:
|
||||
If `curl` isn't installed, install it after updating your manager:
|
||||
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install wget
|
||||
$ sudo apt-get install curl
|
||||
|
||||
3. Get the latest Docker package.
|
||||
|
||||
$ wget -qO- https://get.docker.com/ | sh
|
||||
$ curl -sSL https://get.docker.com/ | sh
|
||||
|
||||
The system prompts you for your `sudo` password. Then, it downloads and
|
||||
installs Docker and its dependencies.
|
||||
@@ -132,7 +132,7 @@ install Docker using the following:
|
||||
>command fails for the Docker repo during installation. To work around this,
|
||||
>add the key directly using the following:
|
||||
>
|
||||
> $ wget -qO- https://get.docker.com/gpg | sudo apt-key add -
|
||||
> $ curl -sSL https://get.docker.com/gpg | sudo apt-key add -
|
||||
|
||||
4. Verify `docker` is installed correctly.
|
||||
|
||||
@@ -197,9 +197,14 @@ When users run Docker, they may see these messages when working with an image:
|
||||
WARNING: Your kernel does not support cgroup swap limit. WARNING: Your
|
||||
kernel does not support swap limit capabilities. Limitation discarded.
|
||||
|
||||
To prevent these messages, enable memory and swap accounting on your system. To
|
||||
enable these on system using GNU GRUB (GNU GRand Unified Bootloader), do the
|
||||
following.
|
||||
To prevent these messages, enable memory and swap accounting on your
|
||||
system. Enabling memory and swap accounting does induce both a memory
|
||||
overhead and a performance degradation even when Docker is not in
|
||||
use. The memory overhead is about 1% of the total available
|
||||
memory. The performance degradation is roughly 10%.
|
||||
|
||||
To enable memory and swap on system using GNU GRUB (GNU GRand Unified
|
||||
Bootloader), do the following:
|
||||
|
||||
1. Log into Ubuntu as a user with `sudo` privileges.
|
||||
|
||||
@@ -339,9 +344,9 @@ to start the docker daemon on boot
|
||||
|
||||
## Upgrade Docker
|
||||
|
||||
To install the latest version of Docker with `wget`:
|
||||
To install the latest version of Docker with `curl`:
|
||||
|
||||
$ wget -qO- https://get.docker.com/ | sh
|
||||
$ curl -sSL https://get.docker.com/ | sh
|
||||
|
||||
## Uninstallation
|
||||
|
||||
|
||||
@@ -9,118 +9,315 @@ parent = "smn_engine"
|
||||
<![end-metadata]-->
|
||||
|
||||
# Windows
|
||||
> **Note:**
|
||||
> Docker has been tested on Windows 7 and 8.1; it may also run on older versions.
|
||||
> Your processor needs to support hardware virtualization.
|
||||
|
||||
The Docker Engine uses Linux-specific kernel features, so to run it on Windows
|
||||
we need to use a lightweight virtual machine (VM). You use the **Windows Docker
|
||||
Client** to control the virtualized Docker Engine to build, run, and manage
|
||||
Docker containers.
|
||||
> **Note**: This release of Docker deprecates the Boot2Docker command line in
|
||||
> favor of Docker Machine. Use the Docker Toolbox to install Docker Machine as
|
||||
> well as the other Docker tools.
|
||||
|
||||
To make this process easier, we've designed a helper application called
|
||||
[Boot2Docker](https://github.com/boot2docker/boot2docker) which creates a Linux virtual
|
||||
machine on Windows to run Docker on a Linux operating system.
|
||||
You install Docker using Docker Toolbox. Docker Toolbox includes the following Docker tools:
|
||||
|
||||
Although you will be using Windows Docker client, the docker engine hosting the
|
||||
containers will still be running on Linux. Until the Docker engine for Windows
|
||||
is developed, you can launch only Linux containers from your Windows machine.
|
||||
* Docker Machine for running the `docker-machine` binary
|
||||
* Docker Engine for running the `docker` binary
|
||||
* Kitematic, the Docker GUI
|
||||
* a shell preconfigured for a Docker command-line environment
|
||||
* Oracle VM VirtualBox
|
||||
|
||||
Because the Docker daemon uses Linux-specific kernel features, you can't run
|
||||
Docker natively in Windows. Instead, you must use `docker-machine` to create and attach to a Docker VM on your machine. This VM hosts Docker for you on your Windows system.
|
||||
|
||||
The Docker VM is lightweight Linux virtual machine made specifically to run the
|
||||
Docker daemon on Windows. The VirtualBox VM runs completely from RAM, is a
|
||||
small ~24MB download, and boots in approximately 5s.
|
||||
|
||||
## Requirements
|
||||
|
||||
Your machine must be running Windows 7.1, 8/8.1 or newer to run Docker. Windows 10 is not currently supported. To find out what version of Windows you have:
|
||||
|
||||
1. Right click the Windows message and choose **System**.
|
||||
|
||||

|
||||
|
||||
If you aren't using a supported version, you could consider upgrading your
|
||||
operating system.
|
||||
|
||||
2. Make sure your Windows system supports Hardware Virtualization Technology and that virtualization is enabled.
|
||||
|
||||
#### For Windows 8 or 8.1
|
||||
|
||||
Choose **Start > Task Manager** and navigate to the **Performance** tab.
|
||||
Under **CPU** you should see the following:
|
||||
|
||||

|
||||
|
||||
If virtualization is not enabled on your system, follow the manufacturer's instructions for enabling it.
|
||||
|
||||
### For Windows 7
|
||||
|
||||
Run the <a
|
||||
href="http://www.microsoft.com/en-us/download/details.aspx?id=592"
|
||||
target="_blank"> Microsoft® Hardware-Assisted Virtualization Detection
|
||||
Tool</a> and follow the on-screen instructions.
|
||||
|
||||
|
||||
> **Note**: If you have Docker hosts running and you don't wish to do a Docker Toolbox
|
||||
installation, you can install the `docker.exe` using the *unofficial* Windows package
|
||||
manager Chocolately. For information on how to do this, see [Docker package on
|
||||
Chocolatey](http://chocolatey.org/packages/docker).
|
||||
|
||||
### Learn the key concepts before installing
|
||||
|
||||
In a Docker installation on Linux, your machine is both the localhost and the
|
||||
Docker host. In networking, localhost means your computer. The Docker host is
|
||||
the machine on which the containers run.
|
||||
|
||||
On a typical Linux installation, the Docker client, the Docker daemon, and any
|
||||
containers run directly on your localhost. This means you can address ports on a
|
||||
Docker container using standard localhost addressing such as `localhost:8000` or
|
||||
`0.0.0.0:8376`.
|
||||
|
||||

|
||||
|
||||
In an Windows installation, the `docker` daemon is running inside a Linux virtual
|
||||
machine. You use the Windows Docker client to talk to the Docker host VM. Your
|
||||
Docker containers run inside this host.
|
||||
|
||||

|
||||
|
||||
## Demonstration
|
||||
In Windows, the Docker host address is the address of the Linux VM. When you
|
||||
start the VM with `docker-machine` it is assigned an IP address. When you start
|
||||
a container, the ports on a container map to ports on the VM. To see this in
|
||||
practice, work through the exercises on this page.
|
||||
|
||||
<iframe width="640" height="480" src="//www.youtube.com/embed/TjMU3bDX4vo?rel=0" frameborder="0" allowfullscreen></iframe>
|
||||
|
||||
## Installation
|
||||
### Installation
|
||||
|
||||
1. Download the latest release of the
|
||||
[Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases/latest).
|
||||
2. Run the installer, which will install Docker Client for Windows, VirtualBox,
|
||||
Git for Windows (MSYS-git), the boot2docker Linux ISO, and the Boot2Docker
|
||||
management tool.
|
||||

|
||||
3. Run the **Boot2Docker Start** shortcut from your Desktop or “Program Files →
|
||||
Boot2Docker for Windows”.
|
||||
The Start script will ask you to enter an ssh key passphrase - the simplest
|
||||
(but least secure) is to just hit [Enter].
|
||||
If you have VirtualBox running, you must shut it down before running the
|
||||
installer.
|
||||
|
||||
4. The **Boot2Docker Start** will start a unix shell already configured to manage
|
||||
Docker running inside the virtual machine. Run `docker version` to see
|
||||
if it is working correctly:
|
||||
1. Go to the [Docker Toolbox](https://www.docker.com/toolbox) page.
|
||||
|
||||

|
||||
2. Click the installer link to download.
|
||||
|
||||
## Running Docker
|
||||
3. Install Docker Toolbox by double-clicking the installer.
|
||||
|
||||
> **Note:** if you are using a remote Docker daemon, such as Boot2Docker,
|
||||
> then _do not_ type the `sudo` before the `docker` commands shown in the
|
||||
> documentation's examples.
|
||||
The installer launches the "Setup - Docker Toolbox" dialog.
|
||||
|
||||
**Boot2Docker Start** will automatically start a shell with environment variables
|
||||
correctly set so you can start using Docker right away:
|
||||

|
||||
|
||||
Let's try the `hello-world` example image. Run
|
||||
4. Press "Next" to install the toolbox.
|
||||
|
||||
$ docker run hello-world
|
||||
The installer presents you with options to customize the standard
|
||||
installation. By default, the standard Docker Toolbox installation:
|
||||
|
||||
* installs executables for the Docker tools in `C:\Program Files\Docker Toolbox`
|
||||
* updates any existing VirtualBox installation
|
||||
* adds a Docker Inc. folder to your program shortcuts
|
||||
* updates your `PATH` environment variable
|
||||
* adds desktop icons for the Docker Quickstart Terminal and Kitematic
|
||||
|
||||
This installation assumes the defaults are acceptable.
|
||||
|
||||
5. Press "Next" until you reach the "Ready to Install" page.
|
||||
|
||||
The system prompts you for your password.
|
||||
|
||||

|
||||
|
||||
6. Press "Install" to continue with the installation.
|
||||
|
||||
When it completes, the installer provides you with some information you can
|
||||
use to complete some common tasks.
|
||||
|
||||

|
||||
|
||||
7. Press "Close" to exit.
|
||||
|
||||
## Running a Docker Container
|
||||
|
||||
To run a Docker container, you:
|
||||
|
||||
* create a new (or start an existing) Docker virtual machine
|
||||
* switch your environment to your new VM
|
||||
* use the `docker` client to create, load, and manage containers
|
||||
|
||||
Once you create a machine, you can reuse it as often as you like. Like any
|
||||
VirtualBox VM, it maintains its configuration between uses.
|
||||
|
||||
There are several ways to use the installed tools, from the Docker Quickstart Terminal or
|
||||
[from your shell](#from-your-shell).
|
||||
|
||||
### From the Docker Quickstart Terminal
|
||||
|
||||
1. Find the Docker Quickstart Terminal icon on your Desktop and double-click to launch it.
|
||||
|
||||
The application:
|
||||
|
||||
* opens a terminal window
|
||||
* creates a `default` if it doesn't exist, starts the VM if it does
|
||||
* points the terminal environment to this VM
|
||||
|
||||
Once the launch completes, you can run `docker` commands.
|
||||
|
||||
3. Verify your setup succeeded by running the `hello-world` container.
|
||||
|
||||
$ docker run hello-world
|
||||
Unable to find image 'hello-world:latest' locally
|
||||
511136ea3c5a: Pull complete
|
||||
31cbccb51277: Pull complete
|
||||
e45a5af57b00: Pull complete
|
||||
hello-world:latest: The image you are pulling has been verified.
|
||||
Important: image verification is a tech preview feature and should not be
|
||||
relied on to provide security.
|
||||
Status: Downloaded newer image for hello-world:latest
|
||||
Hello from Docker.
|
||||
This message shows that your installation appears to be working correctly.
|
||||
|
||||
To generate this message, Docker took the following steps:
|
||||
1. The Docker client contacted the Docker daemon.
|
||||
2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
|
||||
(Assuming it was not already locally available.)
|
||||
3. The Docker daemon created a new container from that image which runs the
|
||||
executable that produces the output you are currently reading.
|
||||
4. The Docker daemon streamed that output to the Docker client, which sent it
|
||||
to your terminal.
|
||||
|
||||
To try something more ambitious, you can run an Ubuntu container with:
|
||||
$ docker run -it ubuntu bash
|
||||
|
||||
For more examples and ideas, visit:
|
||||
http://docs.docker.com/userguide/
|
||||
|
||||
This should download the very small `hello-world` image and print a
|
||||
`Hello from Docker.` message.
|
||||
|
||||
## Using Docker from Windows Command Line Prompt (cmd.exe)
|
||||
|
||||
Launch a Windows Command Line Prompt (cmd.exe).
|
||||
1. Launch a Windows Command Line Prompt (cmd.exe).
|
||||
|
||||
Boot2Docker command requires `ssh.exe` to be in the PATH, therefore we need to
|
||||
include `bin` folder of the Git installation (which has ssh.exe) to the `%PATH%`
|
||||
environment variable by running:
|
||||
The `docker-machine` command requires `ssh.exe` in your `PATH` environment
|
||||
variable. This `.exe` is in the MsysGit `bin` folder.
|
||||
|
||||
set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
|
||||
2. Add this to the `%PATH%` environment variable by running:
|
||||
|
||||
and then we can run the `boot2docker start` command to start the Boot2Docker VM.
|
||||
(Run `boot2docker init` command if you get an error saying machine does not
|
||||
exist.) Then copy the instructions for cmd.exe to set the environment variables
|
||||
to your console window and you are ready to run docker commands such as
|
||||
`docker ps`:
|
||||
set PATH=%PATH%;"c:\Program Files (x86)\Git\bin"
|
||||
|
||||

|
||||
3. Create a new Docker VM.
|
||||
|
||||
docker-machine create --driver virtualbox my-default
|
||||
Creating VirtualBox VM...
|
||||
Creating SSH key...
|
||||
Starting VirtualBox VM...
|
||||
Starting VM...
|
||||
To see how to connect Docker to this machine, run: docker-machine env my-default
|
||||
|
||||
The command also creates a machine configuration in the
|
||||
`C:\USERS\USERNAME\.docker\machine\machines` directory. You only need to run the `create`
|
||||
command once. Then, you can use `docker-machine` to start, stop, query, and
|
||||
otherwise manage the VM from the command line.
|
||||
|
||||
4. List your available machines.
|
||||
|
||||
C:\Users\mary> docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
my-default * virtualbox Running tcp://192.168.99.101:2376
|
||||
|
||||
If you have previously installed the deprecated Boot2Docker application or
|
||||
run the Docker Quickstart Terminal, you may have a `dev` VM as well.
|
||||
|
||||
5. Get the environment commands for your new VM.
|
||||
|
||||
C:\Users\mary> docker-machine env --shell cmd my-default
|
||||
|
||||
6. Connect your shell to the `my-default` machine.
|
||||
|
||||
C:\Users\mary> eval "$(docker-machine env my-default)"
|
||||
|
||||
7. Run the `hello-world` container to verify your setup.
|
||||
|
||||
C:\Users\mary> docker run hello-world
|
||||
|
||||
## Using Docker from PowerShell
|
||||
|
||||
Launch a PowerShell window, then add `ssh.exe` to your PATH:
|
||||
1. Launch a Windows PowerShell window.
|
||||
|
||||
$Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
|
||||
2. Add `ssh.exe` to your PATH:
|
||||
|
||||
and after running the `boot2docker start` command it will print PowerShell
|
||||
commands to set the environment variables to connect to the Docker daemon
|
||||
running inside the VM. Run these commands and you are ready to run docker
|
||||
commands such as `docker ps`:
|
||||
PS C:\Users\mary> $Env:Path = "${Env:Path};c:\Program Files (x86)\Git\bin"
|
||||
|
||||

|
||||
3. Create a new Docker VM.
|
||||
|
||||
> NOTE: You can alternatively run `boot2docker shellinit | Invoke-Expression`
|
||||
> command to set the environment variables instead of copying and pasting on
|
||||
> PowerShell.
|
||||
PS C:\Users\mary> docker-machine create --driver virtualbox my-default
|
||||
|
||||
# Further Details
|
||||
4. List your available machines.
|
||||
|
||||
The Boot2Docker management tool provides several commands:
|
||||
C:\Users\mary> docker-machine ls
|
||||
NAME ACTIVE DRIVER STATE URL SWARM
|
||||
my-default * virtualbox Running tcp://192.168.99.101:2376
|
||||
|
||||
$ boot2docker
|
||||
Usage: boot2docker.exe [<options>] {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|ip|shellinit|delete|download|upgrade|version} [<args>]
|
||||
5. Get the environment commands for your new VM.
|
||||
|
||||
## Upgrading
|
||||
C:\Users\mary> docker-machine env --shell powershell my-default
|
||||
|
||||
1. Download the latest release of the [Docker for Windows Installer](
|
||||
https://github.com/boot2docker/windows-installer/releases/latest)
|
||||
6. Connect your shell to the `my-default` machine.
|
||||
|
||||
2. Run the installer, which will update the Boot2Docker management tool.
|
||||
C:\Users\mary> eval "$(docker-machine env my-default)"
|
||||
|
||||
3. To upgrade your existing virtual machine, open a terminal and run:
|
||||
7. Run the `hello-world` container to verify your setup.
|
||||
|
||||
boot2docker stop
|
||||
boot2docker download
|
||||
boot2docker start
|
||||
C:\Users\mary> docker run hello-world
|
||||
|
||||
|
||||
## Learn about your Toolbox installation
|
||||
|
||||
Toolbox installs the Docker Engine binary in the `C:\Program Files\Docker
|
||||
Toolbox` directory. When you use the Docker Quickstart Terminal or create a
|
||||
`default` manually, Docker Machine updates the
|
||||
`C:\USERS\USERNAME\.docker\machine\machines\default` folder to your
|
||||
system. This folder contains the configuration for the VM.
|
||||
|
||||
You can create multiple VMs on your system with Docker Machine. So, you may have
|
||||
more than one VM folder if you have more than one VM. To remove a VM, use the
|
||||
`docker-machine rm <machine-name>` command.
|
||||
|
||||
## Migrate from Boot2Docker
|
||||
|
||||
If you were using Boot2Docker previously, you have a pre-existing Docker
|
||||
`boot2docker-vm` VM on your local system. To allow Docker Machine to manage
|
||||
this older VM, you can migrate it.
|
||||
|
||||
1. Open a terminal or the Docker CLI on your system.
|
||||
|
||||
2. Type the following command.
|
||||
|
||||
$ docker-machine create -d virtualbox --virtualbox-import-boot2docker-vm boot2docker-vm docker-vm
|
||||
|
||||
3. Use the `docker-machine` command to interact with the migrated VM.
|
||||
|
||||
The `docker-machine` subcommands are slightly different than the `boot2docker`
|
||||
subcommands. The table below lists the equivalent `docker-machine` subcommand
|
||||
and what it does:
|
||||
|
||||
| `boot2docker` | `docker-machine` | `docker-machine` description |
|
||||
|----------------|------------------|----------------------------------------------------------|
|
||||
| init | create | Creates a new docker host. |
|
||||
| up | start | Starts a stopped machine. |
|
||||
| ssh | ssh | Runs a command or interactive ssh session on the machine.|
|
||||
| save | - | Not applicable. |
|
||||
| down | stop | Stops a running machine. |
|
||||
| poweroff | stop | Stops a running machine. |
|
||||
| reset | restart | Restarts a running machine. |
|
||||
| config | inspect | Prints machine configuration details. |
|
||||
| status | ls | Lists all machines and their status. |
|
||||
| info | inspect | Displays a machine's details. |
|
||||
| ip | ip | Displays the machine's ip address. |
|
||||
| shellinit | env | Displays shell commands needed to configure your shell to interact with a machine |
|
||||
| delete | rm | Removes a machine. |
|
||||
| download | - | Not applicable. |
|
||||
| upgrade | upgrade | Upgrades a machine's Docker client to the latest stable release. |
|
||||
|
||||
|
||||
## Upgrade Docker Toolbox
|
||||
|
||||
To upgrade Docker Toolbox, download an re-run [the Docker Toolbox
|
||||
installer](https://www.docker.com/toolbox).
|
||||
|
||||
## Container port redirection
|
||||
|
||||
@@ -157,13 +354,12 @@ uses. You can do this with
|
||||
|
||||
## Uninstallation
|
||||
|
||||
You can uninstall Boot2Docker using Window's standard process for removing programs.
|
||||
This process does not remove the `docker-install.exe` file. You must delete that file
|
||||
yourself.
|
||||
You can uninstall Docker Toolbox using Window's standard process for removing
|
||||
programs. This process does not remove the `docker-install.exe` file. You must
|
||||
delete that file yourself.
|
||||
|
||||
## References
|
||||
## Learn more
|
||||
|
||||
If you have Docker hosts running and if you don't wish to do a
|
||||
Boot2Docker installation, you can install the docker.exe using
|
||||
unofficial Windows package manager Chocolately. For information
|
||||
on how to do this, see [Docker package on Chocolatey](http://chocolatey.org/packages/docker).
|
||||
You can continue with the [Docker User Guide](/userguide). If you are
|
||||
interested in using the Kitematic GUI, see the [Kitermatic user
|
||||
guide](/kitematic/userguide/).
|
||||
|
||||
@@ -116,11 +116,11 @@ images, or you can download Docker images that other people have already created
|
||||
Docker images are the **build** component of Docker.
|
||||
|
||||
#### Docker registries
|
||||
Docker registries hold images. These are public or private stores from which you upload
|
||||
or download images. The public Docker registry is called
|
||||
[Docker Hub](http://hub.docker.com). It provides a huge collection of existing
|
||||
images for your use. These can be images you create yourself or you
|
||||
can use images that others have previously created. Docker registries are the
|
||||
Docker registries hold images. These are public or private stores from which you
|
||||
upload or download images. The public Docker registry is provided with the
|
||||
[Docker Hub](http://hub.docker.com). It serves a huge collection of existing
|
||||
images for your use. These can be images you create yourself or you can use
|
||||
images that others have previously created. Docker registries are the
|
||||
**distribution** component of Docker.
|
||||
|
||||
#### Docker containers
|
||||
@@ -179,8 +179,9 @@ returns a final image.
|
||||
|
||||
### How does a Docker registry work?
|
||||
The Docker registry is the store for your Docker images. Once you build a Docker
|
||||
image you can *push* it to a public registry [Docker Hub](https://hub.docker.com) or to
|
||||
your own registry running behind your firewall.
|
||||
image you can *push* it to a public registry such as the one provided by [Docker
|
||||
Hub](https://hub.docker.com) or to your own registry running behind your
|
||||
firewall.
|
||||
|
||||
Using the Docker client, you can search for already published images and then
|
||||
pull them down to your Docker host to build containers from them.
|
||||
|
||||
@@ -97,7 +97,7 @@ with several powerful functionalities:
|
||||
applications. Your ideal Postgresql setup can be re-used for all your future
|
||||
projects. And so on.
|
||||
|
||||
- *Sharing.* Docker has access to a [public registry](https://hub.docker.com)
|
||||
- *Sharing.* Docker has access to a public registry [on Docker Hub](https://registry.hub.docker.com/)
|
||||
where thousands of people have uploaded useful containers: anything from Redis,
|
||||
CouchDB, Postgres to IRC bouncers to Rails app servers to Hadoop to base images
|
||||
for various Linux distros. The
|
||||
|
||||
@@ -10,502 +10,5 @@ parent = "smn_remoteapi"
|
||||
|
||||
# Docker Hub API
|
||||
|
||||
- This is the REST API for [Docker Hub](https://hub.docker.com).
|
||||
- Authorization is done with basic auth over SSL
|
||||
- Not all commands require authentication, only those noted as such.
|
||||
This API is deprecated as of 1.7. To view the old version, see the [Docker Hub API](https://docs.docker.com/v1.7/reference/api/docker-io_api/) in the 1.7 documentation.
|
||||
|
||||
# Repositories
|
||||
|
||||
## User repository
|
||||
|
||||
### Create a user repository
|
||||
|
||||
`PUT /v1/repositories/(namespace)/(repo_name)/`
|
||||
|
||||
Create a user repository with the given `namespace` and `repo_name`.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foo/bar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=write
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=write
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
### Delete a user repository
|
||||
|
||||
`DELETE /v1/repositories/(namespace)/(repo_name)/`
|
||||
|
||||
Delete a user repository with the given `namespace` and `repo_name`.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
DELETE /v1/repositories/foo/bar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
""
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 202
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="foo/bar",access=delete
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=delete
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Deleted
|
||||
- **202** – Accepted
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
## Library repository
|
||||
|
||||
### Create a library repository
|
||||
|
||||
`PUT /v1/repositories/(repo_name)/`
|
||||
|
||||
Create a library repository with the given `repo_name`.
|
||||
This is a restricted feature only available to docker admins.
|
||||
|
||||
> When namespace is missing, it is assumed to be `library`
|
||||
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foobar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=write
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=write
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
### Delete a library repository
|
||||
|
||||
`DELETE /v1/repositories/(repo_name)/`
|
||||
|
||||
Delete a library repository with the given `repo_name`.
|
||||
This is a restricted feature only available to docker admins.
|
||||
|
||||
> When namespace is missing, it is assumed to be `library`
|
||||
|
||||
|
||||
**Example Request**:
|
||||
|
||||
DELETE /v1/repositories/foobar/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
X-Docker-Token: true
|
||||
|
||||
""
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 202
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
WWW-Authenticate: Token signature=123abc,repository="library/foobar",access=delete
|
||||
X-Docker-Token: signature=123abc,repository="foo/bar",access=delete
|
||||
X-Docker-Endpoints: registry-1.docker.io [, registry-2.docker.io]
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – Deleted
|
||||
- **202** – Accepted
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
# Repository images
|
||||
|
||||
## User repository images
|
||||
|
||||
### Update user repository images
|
||||
|
||||
`PUT /v1/repositories/(namespace)/(repo_name)/images`
|
||||
|
||||
Update the images for a user repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foo/bar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **204** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active or permission denied
|
||||
|
||||
### List user repository images
|
||||
|
||||
`GET /v1/repositories/(namespace)/(repo_name)/images`
|
||||
|
||||
Get the images for a user repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /v1/repositories/foo/bar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"},
|
||||
{"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds",
|
||||
"checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}]
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **404** – Not found
|
||||
|
||||
## Library repository images
|
||||
|
||||
### Update library repository images
|
||||
|
||||
`PUT /v1/repositories/(repo_name)/images`
|
||||
|
||||
Update the images for a library repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foobar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"}]
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **204** – Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active or permission denied
|
||||
|
||||
### List library repository images
|
||||
|
||||
`GET /v1/repositories/(repo_name)/images`
|
||||
|
||||
Get the images for a library repo.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /v1/repositories/foobar/images HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
[{"id": "9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f",
|
||||
"checksum": "b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087"},
|
||||
{"id": "ertwetewtwe38722009fe6857087b486531f9a779a0c1dfddgfgsdgdsgds",
|
||||
"checksum": "34t23f23fc17e3ed29dae8f12c4f9e89cc6f0bsdfgfsdgdsgdsgerwgew"}]
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **404** – Not found
|
||||
|
||||
# Repository authorization
|
||||
|
||||
## Library repository
|
||||
|
||||
### Authorize a token for a library
|
||||
|
||||
`PUT /v1/repositories/(repo_name)/auth`
|
||||
|
||||
Authorize a token for a library repo
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foobar/auth HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Token signature=123abc,repository="library/foobar",access=write
|
||||
|
||||
Parameters:
|
||||
|
||||
- **repo_name** – the library name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"OK"
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **403** – Permission denied
|
||||
- **404** – Not found
|
||||
|
||||
## User repository
|
||||
|
||||
### Authorize a token for a user repository
|
||||
|
||||
`PUT /v1/repositories/(namespace)/(repo_name)/auth`
|
||||
|
||||
Authorize a token for a user repo
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/repositories/foo/bar/auth HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Token signature=123abc,repository="foo/bar",access=write
|
||||
|
||||
Parameters:
|
||||
|
||||
- **namespace** – the namespace for the repo
|
||||
- **repo_name** – the name for the repo
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"OK"
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – OK
|
||||
- **403** – Permission denied
|
||||
- **404** – Not found
|
||||
|
||||
## Users
|
||||
|
||||
### User login
|
||||
|
||||
`GET /v1/users/`
|
||||
|
||||
If you want to check your login, you can try this endpoint
|
||||
|
||||
**Example Request**:
|
||||
|
||||
GET /v1/users/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
OK
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **200** – no error
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
|
||||
### User register
|
||||
|
||||
`POST /v1/users/`
|
||||
|
||||
Registering a new account.
|
||||
|
||||
**Example request**:
|
||||
|
||||
POST /v1/users/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
|
||||
{"email": "sam@docker.com",
|
||||
"password": "toto42",
|
||||
"username": "foobar"}
|
||||
|
||||
Json Parameters:
|
||||
|
||||
- **email** – valid email address, that needs to be confirmed
|
||||
- **username** – min 4 character, max 30 characters, must match
|
||||
the regular expression [a-z0-9_].
|
||||
- **password** – min 5 characters
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 201 OK
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
"User Created"
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **201** – User Created
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
|
||||
### Update user
|
||||
|
||||
`PUT /v1/users/(username)/`
|
||||
|
||||
Change a password or email address for given user. If you pass in an
|
||||
email, it will add it to your account, it will not remove the old
|
||||
one. Passwords will be updated.
|
||||
|
||||
It is up to the client to verify that that password that is sent is
|
||||
the one that they want. Common approach is to have them type it
|
||||
twice.
|
||||
|
||||
**Example Request**:
|
||||
|
||||
PUT /v1/users/fakeuser/ HTTP/1.1
|
||||
Host: index.docker.io
|
||||
Accept: application/json
|
||||
Content-Type: application/json
|
||||
Authorization: Basic akmklmasadalkm==
|
||||
|
||||
{"email": "sam@docker.com",
|
||||
"password": "toto42"}
|
||||
|
||||
Parameters:
|
||||
|
||||
- **username** – username for the person you want to update
|
||||
|
||||
**Example Response**:
|
||||
|
||||
HTTP/1.1 204
|
||||
Vary: Accept
|
||||
Content-Type: application/json
|
||||
|
||||
""
|
||||
|
||||
Status Codes:
|
||||
|
||||
- **204** – User Updated
|
||||
- **400** – Errors (invalid json, missing or invalid fields, etc)
|
||||
- **401** – Unauthorized
|
||||
- **403** – Account is not Active
|
||||
- **404** – User not found
|
||||
|
||||
@@ -298,429 +298,4 @@ The `fromImage` and `repo` parameters now supports the `repo:tag` format.
|
||||
Consequently, the `tag` parameter is now obsolete. Using the new format and
|
||||
the `tag` parameter at the same time will return an error.
|
||||
|
||||
## v1.13
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.13*](/reference/api/docker_remote_api_v1.13/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /containers/(name)/json`
|
||||
|
||||
**New!**
|
||||
The `HostConfig.Links` field is now filled correctly
|
||||
|
||||
**New!**
|
||||
`Sockets` parameter added to the `/info` endpoint listing all the sockets the
|
||||
daemon is configured to listen on.
|
||||
|
||||
`POST /containers/(name)/start`
|
||||
`POST /containers/(name)/stop`
|
||||
|
||||
**New!**
|
||||
`start` and `stop` will now return 304 if the container's status is not modified
|
||||
|
||||
`POST /commit`
|
||||
|
||||
**New!**
|
||||
Added a `pause` parameter (default `true`) to pause the container during commit
|
||||
|
||||
## v1.12
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.12*](/reference/api/docker_remote_api_v1.12/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /build`
|
||||
|
||||
**New!**
|
||||
Build now has support for the `forcerm` parameter to always remove containers
|
||||
|
||||
`GET /containers/(name)/json`
|
||||
`GET /images/(name)/json`
|
||||
|
||||
**New!**
|
||||
All the JSON keys are now in CamelCase
|
||||
|
||||
**New!**
|
||||
Trusted builds are now Automated Builds - `is_trusted` is now `is_automated`.
|
||||
|
||||
**Removed Insert Endpoint**
|
||||
The `insert` endpoint has been removed.
|
||||
|
||||
## v1.11
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.11*](/reference/api/docker_remote_api_v1.11/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /_ping`
|
||||
|
||||
**New!**
|
||||
You can now ping the server via the `_ping` endpoint.
|
||||
|
||||
`GET /events`
|
||||
|
||||
**New!**
|
||||
You can now use the `-until` parameter to close connection
|
||||
after timestamp.
|
||||
|
||||
`GET /containers/(id)/logs`
|
||||
|
||||
This url is preferred method for getting container logs now.
|
||||
|
||||
## v1.10
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.10*](/reference/api/docker_remote_api_v1.10/)
|
||||
|
||||
### What's new
|
||||
|
||||
`DELETE /images/(name)`
|
||||
|
||||
**New!**
|
||||
You can now use the force parameter to force delete of an
|
||||
image, even if it's tagged in multiple repositories. **New!**
|
||||
You
|
||||
can now use the noprune parameter to prevent the deletion of parent
|
||||
images
|
||||
|
||||
`DELETE /containers/(id)`
|
||||
|
||||
**New!**
|
||||
You can now use the force parameter to force delete a
|
||||
container, even if it is currently running
|
||||
|
||||
## v1.9
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.9*](/reference/api/docker_remote_api_v1.9/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /build`
|
||||
|
||||
**New!**
|
||||
This endpoint now takes a serialized ConfigFile which it
|
||||
uses to resolve the proper registry auth credentials for pulling the
|
||||
base image. Clients which previously implemented the version
|
||||
accepting an AuthConfig object must be updated.
|
||||
|
||||
## v1.8
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.8*](/reference/api/docker_remote_api_v1.8/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /build`
|
||||
|
||||
**New!**
|
||||
This endpoint now returns build status as json stream. In
|
||||
case of a build error, it returns the exit status of the failed
|
||||
command.
|
||||
|
||||
`GET /containers/(id)/json`
|
||||
|
||||
**New!**
|
||||
This endpoint now returns the host config for the
|
||||
container.
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
`POST /images/(name)/insert`
|
||||
|
||||
`POST /images/(name)/push`
|
||||
|
||||
**New!**
|
||||
progressDetail object was added in the JSON. It's now
|
||||
possible to get the current value and the total of the progress
|
||||
without having to parse the string.
|
||||
|
||||
## v1.7
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.7*](/reference/api/docker_remote_api_v1.7/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /images/json`
|
||||
|
||||
The format of the json returned from this uri changed. Instead of an
|
||||
entry for each repo/tag on an image, each image is only represented
|
||||
once, with a nested attribute indicating the repo/tags that apply to
|
||||
that image.
|
||||
|
||||
Instead of:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"VirtualSize": 131506275,
|
||||
"Size": 131506275,
|
||||
"Created": 1365714795,
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Tag": "12.04",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 131506275,
|
||||
"Size": 131506275,
|
||||
"Created": 1365714795,
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Tag": "latest",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 131506275,
|
||||
"Size": 131506275,
|
||||
"Created": 1365714795,
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Tag": "precise",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 180116135,
|
||||
"Size": 24653,
|
||||
"Created": 1364102658,
|
||||
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||
"Tag": "12.10",
|
||||
"Repository": "ubuntu"
|
||||
},
|
||||
{
|
||||
"VirtualSize": 180116135,
|
||||
"Size": 24653,
|
||||
"Created": 1364102658,
|
||||
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||
"Tag": "quantal",
|
||||
"Repository": "ubuntu"
|
||||
}
|
||||
]
|
||||
|
||||
The returned json looks like this:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"RepoTags": [
|
||||
"ubuntu:12.04",
|
||||
"ubuntu:precise",
|
||||
"ubuntu:latest"
|
||||
],
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Created": 1365714795,
|
||||
"Size": 131506275,
|
||||
"VirtualSize": 131506275
|
||||
},
|
||||
{
|
||||
"RepoTags": [
|
||||
"ubuntu:12.10",
|
||||
"ubuntu:quantal"
|
||||
],
|
||||
"ParentId": "27cf784147099545",
|
||||
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||
"Created": 1364102658,
|
||||
"Size": 24653,
|
||||
"VirtualSize": 180116135
|
||||
}
|
||||
]
|
||||
|
||||
`GET /images/viz`
|
||||
|
||||
This URI no longer exists. The `images --viz`
|
||||
output is now generated in the client, using the
|
||||
`/images/json` data.
|
||||
|
||||
## v1.6
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.6*](/reference/api/docker_remote_api_v1.6/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /containers/(id)/attach`
|
||||
|
||||
**New!**
|
||||
You can now split stderr from stdout. This is done by
|
||||
prefixing a header to each transmission. See
|
||||
[`POST /containers/(id)/attach`](
|
||||
/reference/api/docker_remote_api_v1.9/#attach-to-a-container "POST /containers/(id)/attach").
|
||||
The WebSocket attach is unchanged. Note that attach calls on the
|
||||
previous API version didn't change. Stdout and stderr are merged.
|
||||
|
||||
## v1.5
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.5*](/reference/api/docker_remote_api_v1.5/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
**New!**
|
||||
You can now pass registry credentials (via an AuthConfig
|
||||
object) through the X-Registry-Auth header
|
||||
|
||||
`POST /images/(name)/push`
|
||||
|
||||
**New!**
|
||||
The AuthConfig object now needs to be passed through the
|
||||
X-Registry-Auth header
|
||||
|
||||
`GET /containers/json`
|
||||
|
||||
**New!**
|
||||
The format of the Ports entry has been changed to a list of
|
||||
dicts each containing PublicPort, PrivatePort and Type describing a
|
||||
port mapping.
|
||||
|
||||
## v1.4
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.4*](/reference/api/docker_remote_api_v1.4/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
**New!**
|
||||
When pulling a repo, all images are now downloaded in parallel.
|
||||
|
||||
`GET /containers/(id)/top`
|
||||
|
||||
**New!**
|
||||
You can now use ps args with docker top, like docker top
|
||||
<container_id> aux
|
||||
|
||||
`GET /events`
|
||||
|
||||
**New!**
|
||||
Image's name added in the events
|
||||
|
||||
## v1.3
|
||||
|
||||
docker v0.5.0
|
||||
[51f6c4a](https://github.com/docker/docker/commit/51f6c4a7372450d164c61e0054daf0223ddbd909)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.3*](/reference/api/docker_remote_api_v1.3/)
|
||||
|
||||
### What's new
|
||||
|
||||
`GET /containers/(id)/top`
|
||||
|
||||
List the processes running inside a container.
|
||||
|
||||
`GET /events`
|
||||
|
||||
**New!**
|
||||
Monitor docker's events via streaming or via polling
|
||||
|
||||
Builder (/build):
|
||||
|
||||
- Simplify the upload of the build context
|
||||
- Simply stream a tarball instead of multipart upload with 4
|
||||
intermediary buffers
|
||||
- Simpler, less memory usage, less disk usage and faster
|
||||
|
||||
> **Warning**:
|
||||
> The /build improvements are not reverse-compatible. Pre 1.3 clients will
|
||||
> break on /build.
|
||||
|
||||
List containers (/containers/json):
|
||||
|
||||
- You can use size=1 to get the size of the containers
|
||||
|
||||
Start containers (/containers/<id>/start):
|
||||
|
||||
- You can now pass host-specific configuration (e.g., bind mounts) in
|
||||
the POST body for start calls
|
||||
|
||||
## v1.2
|
||||
|
||||
docker v0.4.2
|
||||
[2e7649b](https://github.com/docker/docker/commit/2e7649beda7c820793bd46766cbc2cfeace7b168)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.2*](/reference/api/docker_remote_api_v1.2/)
|
||||
|
||||
### What's new
|
||||
|
||||
The auth configuration is now handled by the client.
|
||||
|
||||
The client should send it's authConfig as POST on each call of
|
||||
`/images/(name)/push`
|
||||
|
||||
`GET /auth`
|
||||
|
||||
**Deprecated.**
|
||||
|
||||
`POST /auth`
|
||||
|
||||
Only checks the configuration but doesn't store it on the server
|
||||
|
||||
Deleting an image is now improved, will only untag the image if it
|
||||
has children and remove all the untagged parents if has any.
|
||||
|
||||
`POST /images/<name>/delete`
|
||||
|
||||
Now returns a JSON structure with the list of images
|
||||
deleted/untagged.
|
||||
|
||||
## v1.1
|
||||
|
||||
docker v0.4.0
|
||||
[a8ae398](https://github.com/docker/docker/commit/a8ae398bf52e97148ee7bd0d5868de2e15bd297f)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.1*](/reference/api/docker_remote_api_v1.1/)
|
||||
|
||||
### What's new
|
||||
|
||||
`POST /images/create`
|
||||
|
||||
`POST /images/(name)/insert`
|
||||
|
||||
`POST /images/(name)/push`
|
||||
|
||||
Uses json stream instead of HTML hijack, it looks like this:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{"status":"Pushing..."}
|
||||
{"status":"Pushing", "progress":"1/? (n/a)"}
|
||||
{"error":"Invalid..."}
|
||||
...
|
||||
|
||||
## v1.0
|
||||
|
||||
docker v0.3.4
|
||||
[8d73740](https://github.com/docker/docker/commit/8d73740343778651c09160cde9661f5f387b36f4)
|
||||
|
||||
### Full documentation
|
||||
|
||||
[*Docker Remote API v1.0*](/reference/api/docker_remote_api_v1.0/)
|
||||
|
||||
### What's new
|
||||
|
||||
Initial version
|
||||
|
||||
@@ -1,761 +0,0 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "The Docker Hub and the Registry v1"
|
||||
description = "Documentation for docker Registry and Registry API"
|
||||
keywords = ["docker, registry, api, hub"]
|
||||
[menu.main]
|
||||
parent="smn_hub_ref"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# The Docker Hub and the Registry v1
|
||||
|
||||
## The three roles
|
||||
|
||||
There are three major components playing a role in the Docker ecosystem.
|
||||
|
||||
### Docker Hub
|
||||
|
||||
The Docker Hub is responsible for centralizing information about:
|
||||
|
||||
- User accounts
|
||||
- Checksums of the images
|
||||
- Public namespaces
|
||||
|
||||
The Docker Hub has different components:
|
||||
|
||||
- Web UI
|
||||
- Meta-data store (comments, stars, list public repositories)
|
||||
- Authentication service
|
||||
- Tokenization
|
||||
|
||||
The Docker Hub is authoritative for that information.
|
||||
|
||||
There is only one instance of the Docker Hub, run and
|
||||
managed by Docker Inc.
|
||||
|
||||
### Docker Registry 1.0
|
||||
|
||||
The 1.0 registry has the following characteristics:
|
||||
|
||||
- It stores the images and the graph for a set of repositories
|
||||
- It does not have user accounts data
|
||||
- It has no notion of user accounts or authorization
|
||||
- It delegates authentication and authorization to the Docker Hub Auth
|
||||
service using tokens
|
||||
- It supports different storage backends (S3, cloud files, local FS)
|
||||
- It doesn't have a local database
|
||||
- [Source Code](https://github.com/docker/docker-registry)
|
||||
|
||||
We expect that there will be multiple registries out there. To help you
|
||||
grasp the context, here are some examples of registries:
|
||||
|
||||
- **sponsor registry**: such a registry is provided by a third-party
|
||||
hosting infrastructure as a convenience for their customers and the
|
||||
Docker community as a whole. Its costs are supported by the third
|
||||
party, but the management and operation of the registry are
|
||||
supported by Docker, Inc. It features read/write access, and delegates
|
||||
authentication and authorization to the Docker Hub.
|
||||
- **mirror registry**: such a registry is provided by a third-party
|
||||
hosting infrastructure but is targeted at their customers only. Some
|
||||
mechanism (unspecified to date) ensures that public images are
|
||||
pulled from a sponsor registry to the mirror registry, to make sure
|
||||
that the customers of the third-party provider can `docker pull`
|
||||
those images locally.
|
||||
- **vendor registry**: such a registry is provided by a software
|
||||
vendor who wants to distribute docker images. It would be operated
|
||||
and managed by the vendor. Only users authorized by the vendor would
|
||||
be able to get write access. Some images would be public (accessible
|
||||
for anyone), others private (accessible only for authorized users).
|
||||
Authentication and authorization would be delegated to the Docker Hub.
|
||||
The goal of vendor registries is to let someone do `docker pull
|
||||
basho/riak1.3` and automatically push from the vendor registry
|
||||
(instead of a sponsor registry); i.e., vendors get all the convenience of a
|
||||
sponsor registry, while retaining control on the asset distribution.
|
||||
- **private registry**: such a registry is located behind a firewall,
|
||||
or protected by an additional security layer (HTTP authorization,
|
||||
SSL client-side certificates, IP address authorization...). The
|
||||
registry is operated by a private entity, outside of Docker's
|
||||
control. It can optionally delegate additional authorization to the
|
||||
Docker Hub, but it is not mandatory.
|
||||
|
||||
> **Note:** The latter implies that while HTTP is the protocol
|
||||
> of choice for a registry, multiple schemes are possible (and
|
||||
> in some cases, trivial):
|
||||
>
|
||||
> - HTTP with GET (and PUT for read-write registries);
|
||||
> - local mount point;
|
||||
> - remote docker addressed through SSH.
|
||||
|
||||
The latter would only require two new commands in Docker, e.g.,
|
||||
`registryget` and `registryput`,
|
||||
wrapping access to the local filesystem (and optionally doing
|
||||
consistency checks). Authentication and authorization are then delegated
|
||||
to SSH (e.g., with public keys).
|
||||
|
||||
### Docker
|
||||
|
||||
On top of being a runtime for LXC, Docker is the Registry client. It
|
||||
supports:
|
||||
|
||||
- Push / Pull on the registry
|
||||
- Client authentication on the Docker Hub
|
||||
|
||||
## Workflow
|
||||
|
||||
### Pull
|
||||
|
||||

|
||||
|
||||
1. Contact the Docker Hub to know where I should download “samalba/busybox”
|
||||
2. Docker Hub replies: a. `samalba/busybox` is on Registry A b. here are the
|
||||
checksums for `samalba/busybox` (for all layers) c. token
|
||||
3. Contact Registry A to receive the layers for `samalba/busybox` (all of
|
||||
them to the base image). Registry A is authoritative for “samalba/busybox”
|
||||
but keeps a copy of all inherited layers and serve them all from the same
|
||||
location.
|
||||
4. registry contacts Docker Hub to verify if token/user is allowed to download images
|
||||
5. Docker Hub returns true/false lettings registry know if it should proceed or error
|
||||
out
|
||||
6. Get the payload for all layers
|
||||
|
||||
It's possible to run:
|
||||
|
||||
$ docker pull https://<registry>/repositories/samalba/busybox
|
||||
|
||||
In this case, Docker bypasses the Docker Hub. However the security is not
|
||||
guaranteed (in case Registry A is corrupted) because there won't be any
|
||||
checksum checks.
|
||||
|
||||
Currently registry redirects to s3 urls for downloads, going forward all
|
||||
downloads need to be streamed through the registry. The Registry will
|
||||
then abstract the calls to S3 by a top-level class which implements
|
||||
sub-classes for S3 and local storage.
|
||||
|
||||
Token is only returned when the `X-Docker-Token`
|
||||
header is sent with request.
|
||||
|
||||
Basic Auth is required to pull private repos. Basic auth isn't required
|
||||
for pulling public repos, but if one is provided, it needs to be valid
|
||||
and for an active account.
|
||||
|
||||
**API (pulling repository foo/bar):**
|
||||
|
||||
1. (Docker -> Docker Hub) GET /v1/repositories/foo/bar/images:
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
|
||||
X-Docker-Token: true
|
||||
|
||||
**Action**:
|
||||
|
||||
(looking up the foo/bar in db and gets images and checksums
|
||||
for that repo (all if no tag is specified, if tag, only
|
||||
checksums for those tags) see part 4.4.1)
|
||||
|
||||
2. (Docker Hub -> Docker) HTTP 200 OK
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
X-Docker-Endpoints: registry.docker.io [,registry2.docker.io]
|
||||
|
||||
**Body**:
|
||||
|
||||
Jsonified checksums (see part 4.4.1)
|
||||
|
||||
3. (Docker -> Registry) GET /v1/repositories/foo/bar/tags/latest
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
|
||||
4. (Registry -> Docker Hub) GET /v1/repositories/foo/bar/images
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=read
|
||||
|
||||
**Body**:
|
||||
|
||||
<ids and checksums in payload>
|
||||
|
||||
**Action**:
|
||||
|
||||
(Lookup token see if they have access to pull.)
|
||||
|
||||
If good:
|
||||
HTTP 200 OK Docker Hub will invalidate the token
|
||||
|
||||
If bad:
|
||||
HTTP 401 Unauthorized
|
||||
|
||||
5. (Docker -> Registry) GET /v1/images/928374982374/ancestry
|
||||
|
||||
**Action**:
|
||||
|
||||
(for each image id returned in the registry, fetch /json + /layer)
|
||||
|
||||
> **Note**:
|
||||
> If someone makes a second request, then we will always give a new token,
|
||||
> never reuse tokens.
|
||||
|
||||
### Push
|
||||
|
||||

|
||||
|
||||
1. Contact the Docker Hub to allocate the repository name “samalba/busybox”
|
||||
(authentication required with user credentials)
|
||||
2. If authentication works and namespace available, “samalba/busybox”
|
||||
is allocated and a temporary token is returned (namespace is marked
|
||||
as initialized in Docker Hub)
|
||||
3. Push the image on the registry (along with the token)
|
||||
4. Registry A contacts the Docker Hub to verify the token (token must
|
||||
corresponds to the repository name)
|
||||
5. Docker Hub validates the token. Registry A starts reading the stream
|
||||
pushed by docker and store the repository (with its images)
|
||||
6. docker contacts the Docker Hub to give checksums for upload images
|
||||
|
||||
> **Note:**
|
||||
> **It's possible not to use the Docker Hub at all!** In this case, a deployed
|
||||
> version of the Registry is deployed to store and serve images. Those
|
||||
> images are not authenticated and the security is not guaranteed.
|
||||
|
||||
> **Note:**
|
||||
> **Docker Hub can be replaced!** For a private Registry deployed, a custom
|
||||
> Docker Hub can be used to serve and validate token according to different
|
||||
> policies.
|
||||
|
||||
Docker computes the checksums and submit them to the Docker Hub at the end of
|
||||
the push. When a repository name does not have checksums on the Docker Hub,
|
||||
it means that the push is in progress (since checksums are submitted at
|
||||
the end).
|
||||
|
||||
**API (pushing repos foo/bar):**
|
||||
|
||||
1. (Docker -> Docker Hub) PUT /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token:
|
||||
true
|
||||
|
||||
**Action**:
|
||||
|
||||
- in Docker Hub, we allocated a new repository, and set to
|
||||
initialized
|
||||
|
||||
**Body**:
|
||||
|
||||
(The body contains the list of images that are going to be
|
||||
pushed, with empty checksums. The checksums will be set at
|
||||
the end of the push):
|
||||
|
||||
[{“id”: “9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”}]
|
||||
|
||||
2. (Docker Hub -> Docker) 200 Created
|
||||
|
||||
**Headers**:
|
||||
|
||||
WWW-Authenticate: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
|
||||
|
||||
3. (Docker -> Registry) PUT /v1/images/98765432_parent/json
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
|
||||
4. (Registry->Docker Hub) GET /v1/repositories/foo/bar/images
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
|
||||
**Action**:
|
||||
|
||||
- Docker Hub:
|
||||
will invalidate the token.
|
||||
- Registry:
|
||||
grants a session (if token is approved) and fetches
|
||||
the images id
|
||||
|
||||
5. (Docker -> Registry) PUT /v1/images/98765432_parent/json
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=write
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
6. (Docker -> Registry) PUT /v1/images/98765432/json
|
||||
|
||||
**Headers**:
|
||||
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
7. (Docker -> Registry) PUT /v1/images/98765432_parent/layer
|
||||
|
||||
**Headers**:
|
||||
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
8. (Docker -> Registry) PUT /v1/images/98765432/layer
|
||||
|
||||
**Headers**:
|
||||
|
||||
X-Docker-Checksum: sha256:436745873465fdjkhdfjkgh
|
||||
|
||||
9. (Docker -> Registry) PUT /v1/repositories/foo/bar/tags/latest
|
||||
|
||||
**Headers**:
|
||||
|
||||
Cookie: (Cookie provided by the Registry)
|
||||
|
||||
**Body**:
|
||||
|
||||
“98765432”
|
||||
|
||||
10. (Docker -> Docker Hub) PUT /v1/repositories/foo/bar/images
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints:
|
||||
registry1.docker.io (no validation on this right now)
|
||||
|
||||
**Body**:
|
||||
|
||||
(The image, id`s, tags and checksums)
|
||||
[{“id”:
|
||||
“9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”:
|
||||
“b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}]
|
||||
|
||||
**Return**:
|
||||
|
||||
HTTP 204
|
||||
|
||||
> **Note:** If push fails and they need to start again, what happens in the Docker Hub,
|
||||
> there will already be a record for the namespace/name, but it will be
|
||||
> initialized. Should we allow it, or mark as name already used? One edge
|
||||
> case could be if someone pushes the same thing at the same time with two
|
||||
> different shells.
|
||||
|
||||
If it's a retry on the Registry, Docker has a cookie (provided by the
|
||||
registry after token validation). So the Docker Hub won't have to provide a
|
||||
new token.
|
||||
|
||||
### Delete
|
||||
|
||||
If you need to delete something from the Docker Hub or registry, we need a
|
||||
nice clean way to do that. Here is the workflow.
|
||||
|
||||
1. Docker contacts the Docker Hub to request a delete of a repository
|
||||
`samalba/busybox` (authentication required with user credentials)
|
||||
2. If authentication works and repository is valid, `samalba/busybox`
|
||||
is marked as deleted and a temporary token is returned
|
||||
3. Send a delete request to the registry for the repository (along with
|
||||
the token)
|
||||
4. Registry A contacts the Docker Hub to verify the token (token must
|
||||
corresponds to the repository name)
|
||||
5. Docker Hub validates the token. Registry A deletes the repository and
|
||||
everything associated to it.
|
||||
6. docker contacts the Docker Hub to let it know it was removed from the
|
||||
registry, the Docker Hub removes all records from the database.
|
||||
|
||||
> **Note**:
|
||||
> The Docker client should present an "Are you sure?" prompt to confirm
|
||||
> the deletion before starting the process. Once it starts it can't be
|
||||
> undone.
|
||||
|
||||
**API (deleting repository foo/bar):**
|
||||
|
||||
1. (Docker -> Docker Hub) DELETE /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic sdkjfskdjfhsdkjfh== X-Docker-Token:
|
||||
true
|
||||
|
||||
**Action**:
|
||||
|
||||
- in Docker Hub, we make sure it is a valid repository, and set
|
||||
to deleted (logically)
|
||||
|
||||
**Body**:
|
||||
|
||||
Empty
|
||||
|
||||
2. (Docker Hub -> Docker) 202 Accepted
|
||||
|
||||
**Headers**:
|
||||
|
||||
WWW-Authenticate: Token
|
||||
signature=123abc,repository=”foo/bar”,access=delete
|
||||
X-Docker-Endpoints: registry.docker.io [, registry2.docker.io]
|
||||
# list of endpoints where this repo lives.
|
||||
|
||||
3. (Docker -> Registry) DELETE /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=delete
|
||||
|
||||
4. (Registry->Docker Hub) PUT /v1/repositories/foo/bar/auth
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=delete
|
||||
|
||||
**Action**:
|
||||
|
||||
- Docker Hub:
|
||||
will invalidate the token.
|
||||
- Registry:
|
||||
deletes the repository (if token is approved)
|
||||
|
||||
5. (Registry -> Docker) 200 OK
|
||||
|
||||
200 If success 403 if forbidden 400 if bad request 404
|
||||
if repository isn't found
|
||||
|
||||
6. (Docker -> Docker Hub) DELETE /v1/repositories/foo/bar/
|
||||
|
||||
**Headers**:
|
||||
|
||||
Authorization: Basic 123oislifjsldfj== X-Docker-Endpoints:
|
||||
registry-1.docker.io (no validation on this right now)
|
||||
|
||||
**Body**:
|
||||
|
||||
Empty
|
||||
|
||||
**Return**:
|
||||
|
||||
HTTP 200
|
||||
|
||||
## How to use the Registry in standalone mode
|
||||
|
||||
The Docker Hub has two main purposes (along with its fancy social features):
|
||||
|
||||
- Resolve short names (to avoid passing absolute URLs all the time):
|
||||
|
||||
username/projectname ->
|
||||
https://registry.docker.io/users/<username>/repositories/<projectname>/
|
||||
team/projectname ->
|
||||
https://registry.docker.io/team/<team>/repositories/<projectname>/
|
||||
|
||||
- Authenticate a user as a repos owner (for a central referenced
|
||||
repository)
|
||||
|
||||
### Without a Docker Hub
|
||||
|
||||
Using the Registry without the Docker Hub can be useful to store the images
|
||||
on a private network without having to rely on an external entity
|
||||
controlled by Docker Inc.
|
||||
|
||||
In this case, the registry will be launched in a special mode
|
||||
(-standalone? ne? -no-index?). In this mode, the only thing which changes is
|
||||
that Registry will never contact the Docker Hub to verify a token. It will be
|
||||
the Registry owner responsibility to authenticate the user who pushes
|
||||
(or even pulls) an image using any mechanism (HTTP auth, IP based,
|
||||
etc...).
|
||||
|
||||
In this scenario, the Registry is responsible for the security in case
|
||||
of data corruption since the checksums are not delivered by a trusted
|
||||
entity.
|
||||
|
||||
As hinted previously, a standalone registry can also be implemented by
|
||||
any HTTP server handling GET/PUT requests (or even only GET requests if
|
||||
no write access is necessary).
|
||||
|
||||
### With a Docker Hub
|
||||
|
||||
The Docker Hub data needed by the Registry are simple:
|
||||
|
||||
- Serve the checksums
|
||||
- Provide and authorize a Token
|
||||
|
||||
In the scenario of a Registry running on a private network with the need
|
||||
of centralizing and authorizing, it's easy to use a custom Docker Hub.
|
||||
|
||||
The only challenge will be to tell Docker to contact (and trust) this
|
||||
custom Docker Hub. Docker will be configurable at some point to use a
|
||||
specific Docker Hub, it'll be the private entity responsibility (basically
|
||||
the organization who uses Docker in a private environment) to maintain
|
||||
the Docker Hub and the Docker's configuration among its consumers.
|
||||
|
||||
## The API
|
||||
|
||||
The first version of the api is available here:
|
||||
[https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md](https://github.com/jpetazzo/docker/blob/acd51ecea8f5d3c02b00a08176171c59442df8b3/docs/images-repositories-push-pull.md)
|
||||
|
||||
### Images
|
||||
|
||||
The format returned in the images is not defined here (for layer and
|
||||
JSON), basically because Registry stores exactly the same kind of
|
||||
information as Docker uses to manage them.
|
||||
|
||||
The format of ancestry is a line-separated list of image ids, in age
|
||||
order, i.e. the image's parent is on the last line, the parent of the
|
||||
parent on the next-to-last line, etc.; if the image has no parent, the
|
||||
file is empty.
|
||||
|
||||
GET /v1/images/<image_id>/layer
|
||||
PUT /v1/images/<image_id>/layer
|
||||
GET /v1/images/<image_id>/json
|
||||
PUT /v1/images/<image_id>/json
|
||||
GET /v1/images/<image_id>/ancestry
|
||||
PUT /v1/images/<image_id>/ancestry
|
||||
|
||||
### Users
|
||||
|
||||
### Create a user (Docker Hub)
|
||||
|
||||
POST /v1/users:
|
||||
|
||||
**Body**:
|
||||
|
||||
{"email": "[sam@docker.com](mailto:sam%40docker.com)",
|
||||
"password": "toto42", "username": "foobar"`}
|
||||
|
||||
**Validation**:
|
||||
|
||||
- **username**: min 4 character, max 30 characters, must match the
|
||||
regular expression [a-z0-9_].
|
||||
- **password**: min 5 characters
|
||||
|
||||
**Valid**:
|
||||
|
||||
return HTTP 201
|
||||
|
||||
Errors: HTTP 400 (we should create error codes for possible errors) -
|
||||
invalid json - missing field - wrong format (username, password, email,
|
||||
etc) - forbidden name - name already exists
|
||||
|
||||
> **Note**:
|
||||
> A user account will be valid only if the email has been validated (a
|
||||
> validation link is sent to the email address).
|
||||
|
||||
### Update a user (Docker Hub)
|
||||
|
||||
PUT /v1/users/<username>
|
||||
|
||||
**Body**:
|
||||
|
||||
{"password": "toto"}
|
||||
|
||||
> **Note**:
|
||||
> We can also update email address, if they do, they will need to reverify
|
||||
> their new email address.
|
||||
|
||||
### Login (Docker Hub)
|
||||
|
||||
Does nothing else but asking for a user authentication. Can be used to
|
||||
validate credentials. HTTP Basic Auth for now, maybe change in future.
|
||||
|
||||
GET /v1/users
|
||||
|
||||
**Return**:
|
||||
- Valid: HTTP 200
|
||||
- Invalid login: HTTP 401
|
||||
- Account inactive: HTTP 403 Account is not Active
|
||||
|
||||
### Tags (Registry)
|
||||
|
||||
The Registry does not know anything about users. Even though
|
||||
repositories are under usernames, it's just a namespace for the
|
||||
registry. Allowing us to implement organizations or different namespaces
|
||||
per user later, without modifying the Registry's API.
|
||||
|
||||
The following naming restrictions apply:
|
||||
|
||||
- Namespaces must match the same regular expression as usernames (See
|
||||
4.2.1.)
|
||||
- Repository names must match the regular expression [a-zA-Z0-9-_.]
|
||||
|
||||
### Get all tags:
|
||||
|
||||
GET /v1/repositories/<namespace>/<repository_name>/tags
|
||||
|
||||
**Return**: HTTP 200
|
||||
[
|
||||
{
|
||||
"layer": "9e89cc6f",
|
||||
"name": "latest"
|
||||
},
|
||||
{
|
||||
"layer": "b486531f",
|
||||
"name": "0.1.1",
|
||||
}
|
||||
]
|
||||
|
||||
**4.3.2 Read the content of a tag (resolve the image id):**
|
||||
|
||||
GET /v1/repositories/<namespace>/<repo_name>/tags/<tag>
|
||||
|
||||
**Return**:
|
||||
|
||||
"9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f"
|
||||
|
||||
**4.3.3 Delete a tag (registry):**
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>/tags/<tag>
|
||||
|
||||
### 4.4 Images (Docker Hub)
|
||||
|
||||
For the Docker Hub to “resolve” the repository name to a Registry location,
|
||||
it uses the X-Docker-Endpoints header. In other terms, this requests
|
||||
always add a `X-Docker-Endpoints` to indicate the
|
||||
location of the registry which hosts this repository.
|
||||
|
||||
**4.4.1 Get the images:**
|
||||
|
||||
GET /v1/repositories/<namespace>/<repo_name>/images
|
||||
|
||||
**Return**: HTTP 200
|
||||
[{“id”:
|
||||
“9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”:
|
||||
“[md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087](md5:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087)”}]
|
||||
|
||||
### Add/update the images:
|
||||
|
||||
You always add images, you never remove them.
|
||||
|
||||
PUT /v1/repositories/<namespace>/<repo_name>/images
|
||||
|
||||
**Body**:
|
||||
|
||||
[ {“id”:
|
||||
“9e89cc6f0bc3c38722009fe6857087b486531f9a779a0c17e3ed29dae8f12c4f”,
|
||||
“checksum”:
|
||||
“sha256:b486531f9a779a0c17e3ed29dae8f12c4f9e89cc6f0bc3c38722009fe6857087”}
|
||||
]
|
||||
|
||||
**Return**:
|
||||
|
||||
204
|
||||
|
||||
### Repositories
|
||||
|
||||
### Remove a Repository (Registry)
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>
|
||||
|
||||
Return 200 OK
|
||||
|
||||
### Remove a Repository (Docker Hub)
|
||||
|
||||
This starts the delete process. see 2.3 for more details.
|
||||
|
||||
DELETE /v1/repositories/<namespace>/<repo_name>
|
||||
|
||||
Return 202 OK
|
||||
|
||||
## Chaining Registries
|
||||
|
||||
It's possible to chain Registries server for several reasons:
|
||||
|
||||
- Load balancing
|
||||
- Delegate the next request to another server
|
||||
|
||||
When a Registry is a reference for a repository, it should host the
|
||||
entire images chain in order to avoid breaking the chain during the
|
||||
download.
|
||||
|
||||
The Docker Hub and Registry use this mechanism to redirect on one or the
|
||||
other.
|
||||
|
||||
Example with an image download:
|
||||
|
||||
On every request, a special header can be returned:
|
||||
|
||||
X-Docker-Endpoints: server1,server2
|
||||
|
||||
On the next request, the client will always pick a server from this
|
||||
list.
|
||||
|
||||
## Authentication and authorization
|
||||
|
||||
### On the Docker Hub
|
||||
|
||||
The Docker Hub supports both “Basic” and “Token” challenges. Usually when
|
||||
there is a `401 Unauthorized`, the Docker Hub replies
|
||||
this:
|
||||
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Basic realm="auth required",Token
|
||||
|
||||
You have 3 options:
|
||||
|
||||
1. Provide user credentials and ask for a token
|
||||
|
||||
**Header**:
|
||||
|
||||
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
|
||||
X-Docker-Token: true
|
||||
|
||||
In this case, along with the 200 response, you'll get a new token
|
||||
(if user auth is ok): If authorization isn't correct you get a 401
|
||||
response. If account isn't active you will get a 403 response.
|
||||
|
||||
**Response**:
|
||||
|
||||
200 OK
|
||||
X-Docker-Token: Token
|
||||
signature=123abc,repository=”foo/bar”,access=read
|
||||
|
||||
|
||||
2. Provide user credentials only
|
||||
|
||||
**Header**:
|
||||
|
||||
Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==
|
||||
|
||||
3. Provide Token
|
||||
|
||||
**Header**:
|
||||
|
||||
Authorization: Token
|
||||
signature=123abc,repository=”foo/bar”,access=read
|
||||
|
||||
### 6.2 On the Registry
|
||||
|
||||
The Registry only supports the Token challenge:
|
||||
|
||||
401 Unauthorized
|
||||
WWW-Authenticate: Token
|
||||
|
||||
The only way is to provide a token on `401 Unauthorized`
|
||||
responses:
|
||||
|
||||
Authorization: Token signature=123abc,repository="foo/bar",access=read
|
||||
|
||||
Usually, the Registry provides a Cookie when a Token verification
|
||||
succeeded. Every time the Registry passes a Cookie, you have to pass it
|
||||
back the same cookie.:
|
||||
|
||||
200 OK
|
||||
Set-Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="; Path=/; HttpOnly
|
||||
|
||||
Next request:
|
||||
|
||||
GET /(...)
|
||||
Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4="
|
||||
|
||||
## Document version
|
||||
|
||||
- 1.0 : May 6th 2013 : initial release
|
||||
- 1.1 : June 1st 2013 : Added Delete Repository and way to handle new
|
||||
source namespace.
|
||||
|
||||
@@ -114,12 +114,6 @@ images.
|
||||
|
||||
### Environment replacement
|
||||
|
||||
> **Note**: prior to 1.3, `Dockerfile` environment variables were handled
|
||||
> similarly, in that they would be replaced as described below. However, there
|
||||
> was no formal definition on as to which instructions handled environment
|
||||
> replacement at the time. After 1.3 this behavior will be preserved and
|
||||
> canonical.
|
||||
|
||||
Environment variables (declared with [the `ENV` statement](#env)) can also be
|
||||
used in certain instructions as variables to be interpreted by the
|
||||
`Dockerfile`. Escapes are also handled for including variable-like syntax
|
||||
|
||||
@@ -16,7 +16,7 @@ or execute `docker help`:
|
||||
$ docker
|
||||
Usage: docker [OPTIONS] COMMAND [arg...]
|
||||
docker daemon [ --help | ... ]
|
||||
docker [ -h | --help | -v | --version ]
|
||||
docker [ --help | -v | --version ]
|
||||
|
||||
-H, --host=[]: The socket(s) to bind to in daemon mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ parent = "smn_cli"
|
||||
-G, --group="docker" Group for the unix socket
|
||||
-g, --graph="/var/lib/docker" Root of the Docker runtime
|
||||
-H, --host=[] Daemon socket(s) to connect to
|
||||
-h, --help=false Print usage
|
||||
--help=false Print usage
|
||||
--icc=true Enable inter-container communication
|
||||
--insecure-registry=[] Enable insecure registry communication
|
||||
--ip=0.0.0.0 Default IP when binding container ports
|
||||
|
||||
291
docs/security/trust/content_trust.md
Normal file
291
docs/security/trust/content_trust.md
Normal file
@@ -0,0 +1,291 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Content trust in Docker"
|
||||
description = "Enabling content trust in Docker"
|
||||
keywords = ["content, trust, security, docker, documentation"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
weight=-1
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Content trust in Docker
|
||||
|
||||
When transferring data among networked systems, *trust* is a central concern. In
|
||||
particular, when communicating over an untrusted medium such as the internet, it
|
||||
is critical to ensure the integrity and publisher of all the data a system
|
||||
operates on. You use Docker to push and pull images (data) to a registry. Content trust
|
||||
gives you the ability to both verify the integrity and the publisher of all the
|
||||
data received from a registry over any channel.
|
||||
|
||||
Content trust is currently only available for users of the public Docker Hub. It
|
||||
is currently not available for the Docker Trusted Registry or for private
|
||||
registries.
|
||||
|
||||
## Understand trust in Docker
|
||||
|
||||
Content trust allows operations with a remote Docker registry to enforce
|
||||
client-side signing and verification of image tags. Content trust provides the
|
||||
ability to use digital signatures for data sent to and received from remote
|
||||
Docker registries. These signatures allow client-side verification of the
|
||||
integrity and publisher of specific image tags.
|
||||
|
||||
Currently, content trust is disabled by default. You must enabled it by setting
|
||||
the `DOCKER_CONTENT_TRUST` environment variable.
|
||||
|
||||
Once content trust is enabled, image publishers can sign their images. Image consumers can
|
||||
ensure that the images they use are signed. publishers and consumers can be
|
||||
individuals alone or in organizations. Docker's content trust supports users and
|
||||
automated processes such as builds.
|
||||
|
||||
### Image tags and content trust
|
||||
|
||||
An individual image record has the following identifier:
|
||||
|
||||
```
|
||||
[REGISTRY_HOST[:REGISTRY_PORT]/]REPOSITORY[:TAG]
|
||||
```
|
||||
|
||||
A particular image `REPOSITORY` can have multiple tags. For example, `latest` and
|
||||
`3.1.2` are both tags on the `mongo` image. An image publisher can build an image
|
||||
and tag combination many times changing the image with each build.
|
||||
|
||||
Content trust is associated with the `TAG` portion of an image. Each image
|
||||
repository has a set of keys that image publishers use to sign an image tag.
|
||||
Image publishers have discretion on which tags they sign.
|
||||
|
||||
An image repository can contain an image with one tag that is signed and another
|
||||
tag that is not. For example, consider [the Mongo image
|
||||
repository](https://hub.docker.com/r/library/mongo/tags/). The `latest`
|
||||
tag could be unsigned while the `3.1.6` tag could be signed. It is the
|
||||
responsibility of the image publisher to decide if an image tag is signed or
|
||||
not. In this representation, some image tags are signed, others are not:
|
||||
|
||||

|
||||
|
||||
Publishers can choose to sign a specific tag or not. As a result, the content of
|
||||
an unsigned tag and that of a signed tag with the same name may not match. For
|
||||
example, a publisher can push a tagged image `someimage:latest` and sign it.
|
||||
Later, the same publisher can push an unsigned `someimage:latest` image. This second
|
||||
push replaces the last unsigned tag `latest` but does not affect the signed `latest` version.
|
||||
The ability to choose which tags they can sign, allows publishers to iterate over
|
||||
the unsigned version of an image before officially signing it.
|
||||
|
||||
Image consumers can enable content trust to ensure that images they use were
|
||||
signed. If a consumer enables content trust, they can only pull, run, or build
|
||||
with trusted images. Enabling content trust is like wearing a pair of
|
||||
rose-colored glasses. Consumers "see" only signed images tags and the less
|
||||
desirable, unsigned image tags are "invisible" to them.
|
||||
|
||||

|
||||
|
||||
To the consumer who does not enabled content trust, nothing about how they
|
||||
work with Docker images changes. Every image is visible regardless of whether it
|
||||
is signed or not.
|
||||
|
||||
|
||||
### Content trust operations and keys
|
||||
|
||||
When content trust is enabled, `docker` CLI commands that operate on tagged images must
|
||||
either have content signatures or explicit content hashes. The commands that
|
||||
operate with content trust are:
|
||||
|
||||
* `push`
|
||||
* `build`
|
||||
* `create`
|
||||
* `pull`
|
||||
* `run`
|
||||
|
||||
For example, with content trust enabled a `docker pull someimage:latest` only
|
||||
succeeds if `someimage:latest` is signed. However, an operation with an explicit
|
||||
content hash always succeeds as long as the hash exists:
|
||||
|
||||
```bash
|
||||
$ docker pull someimage@sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a
|
||||
```
|
||||
|
||||
Trust for an image tag is managed through the use of signing keys. Docker's content
|
||||
trust makes use four different keys:
|
||||
|
||||
| Key | Description |
|
||||
|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| offline key | Root of content trust for a image tag. When content trust is enabled, you create the offline key once. |
|
||||
| target and snapshot | These two keys are known together as the "tagging" key. When content trust is enabled, you create this key when you add a new image repository. If you have the offline key, you can export the tagging key and allow other publishers to sign the image tags. |
|
||||
| timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. |
|
||||
|
||||
With the exception of the timestamp, all the keys are generated and stored locally
|
||||
client-side. The timestamp is safely generated and stored in a signing server that
|
||||
is deployed alongside the Docker registry. All keys are generated in a backend
|
||||
service that isn't directly exposed to the internet and are encrypted at rest.
|
||||
|
||||
The following image depicts the various signing keys and their relationships:
|
||||
|
||||

|
||||
|
||||
>**WARNING**: Loss of the offline key is **very difficult** to recover from.
|
||||
>Correcting this loss requires intervention from [Docker
|
||||
>Support](https://support.docker.com) to reset the repository state. This loss
|
||||
>also requires **manual intervention** from every consumer that used a signed
|
||||
>tag from this repository prior to the loss.
|
||||
|
||||
You should backup the offline key somewhere safe. Given that it is only required
|
||||
to create new repositories, it is a good idea to store it offline. Make sure you
|
||||
read [Manage keys for content trust](/security/trust/trust_key_mng) information
|
||||
for details on creating, securing, and backing up your keys.
|
||||
|
||||
## Survey of typical content trust operations
|
||||
|
||||
This section surveys the typical trusted operations users perform with Docker
|
||||
images.
|
||||
|
||||
### Enable content trust
|
||||
|
||||
Enable content trust by setting the `DOCKER_CONTENT_TRUST` environment variable.
|
||||
Enabling per-shell is useful because you can have one shell configured for
|
||||
trusted operations and another terminal shell for untrusted operations. You can
|
||||
also add this declaration to your shell profile to have it turned on always by
|
||||
default.
|
||||
|
||||
To enable content trust in a `bash` shell enter the following command:
|
||||
|
||||
```bash
|
||||
export DOCKER_CONTENT_TRUST=1
|
||||
```
|
||||
|
||||
Once set, each of the "tag" operations require key for trusted tag. All of these
|
||||
commands also support the `--disable-content-trust` flag. This flag allows
|
||||
publishers to run individual operations on tagged images without content trust on an
|
||||
as-needed basis.
|
||||
|
||||
|
||||
### Push trusted content
|
||||
|
||||
To create signed content for a specific image tag, simply enable content trust and push
|
||||
a tagged image. If this is the first time you have pushed an image using content trust
|
||||
on your system, the session looks like this:
|
||||
|
||||
```bash
|
||||
$ docker push docker/trusttest:latest
|
||||
The push refers to a repository [docker.io/docker/trusttest] (len: 1)
|
||||
9a61b6b1315e: Image already exists
|
||||
902b87aaaec9: Image already exists
|
||||
latest: digest: sha256:d02adacee0ac7a5be140adb94fa1dae64f4e71a68696e7f8e7cbf9db8dd49418 size: 3220
|
||||
Signing and pushing trust metadata
|
||||
You are about to create a new offline signing key passphrase. This passphrase
|
||||
will be used to protect the most sensitive key in your signing system. Please
|
||||
choose a long, complex passphrase and be careful to keep the password and the
|
||||
key file itself secure and backed up. It is highly recommended that you use a
|
||||
password manager to generate the passphrase and keep it safe. There will be no
|
||||
way to recover this key. You can find the key in your config directory.
|
||||
Enter passphrase for new offline key with id a1d96fb:
|
||||
Repeat passphrase for new offline key with id a1d96fb:
|
||||
Enter passphrase for new tagging key with id docker.io/docker/trusttest (3a932f1):
|
||||
Repeat passphrase for new tagging key with id docker.io/docker/trusttest (3a932f1):
|
||||
Finished initializing "docker.io/docker/trusttest"
|
||||
```
|
||||
When you push your first tagged image with content trust enabled, the `docker` client
|
||||
recognizes this is your first push and:
|
||||
|
||||
- alerts you that it will create a new offline key
|
||||
- requests a passphrase for the key
|
||||
- generates an offline key in the `~/.docker/trust` directory
|
||||
- generates a tagging key for in the `~/.docker/trust` directory
|
||||
|
||||
The passphrase you chose for both the offline key and your content key-pair should
|
||||
be randomly generated and stored in a *password manager*.
|
||||
|
||||
It is important to note, if you had left off the `latest` tag, content trust is skipped.
|
||||
This is true even if content trust is enabled and even if this is your first push.
|
||||
|
||||
```bash
|
||||
$ docker push docker/trusttest
|
||||
The push refers to a repository [docker.io/docker/trusttest] (len: 1)
|
||||
9a61b6b1315e: Image successfully pushed
|
||||
902b87aaaec9: Image successfully pushed
|
||||
latest: digest: sha256:a9a9c4402604b703bed1c847f6d85faac97686e48c579bd9c3b0fa6694a398fc size: 3220
|
||||
No tag specified, skipping trust metadata push
|
||||
```
|
||||
|
||||
It is skipped because as the message states, you did not supply an image `TAG`
|
||||
value. In Docker content trust, signatures are associated with tags.
|
||||
|
||||
Once you have an offline key on your system, subsequent images repositories
|
||||
you create can use that same offline key:
|
||||
|
||||
```bash
|
||||
$ docker push docker.io/docker/seaside:latest
|
||||
The push refers to a repository [docker.io/docker/seaside] (len: 1)
|
||||
a9539b34a6ab: Image successfully pushed
|
||||
b3dbab3810fc: Image successfully pushed
|
||||
latest: digest: sha256:d2ba1e603661a59940bfad7072eba698b79a8b20ccbb4e3bfb6f9e367ea43939 size: 3346
|
||||
Signing and pushing trust metadata
|
||||
Enter key passphrase for offline key with id a1d96fb:
|
||||
Enter passphrase for new tagging key with id docker.io/docker/seaside (bb045e3):
|
||||
Repeat passphrase for new tagging key with id docker.io/docker/seaside (bb045e3):
|
||||
Finished initializing "docker.io/docker/seaside"
|
||||
```
|
||||
|
||||
The new image has its own tagging key and timestamp key. The `latest` tag is signed with both of
|
||||
these.
|
||||
|
||||
|
||||
### Pull image content
|
||||
|
||||
A common way to consume an image is to `pull` it. With content trust enabled, the Docker
|
||||
client only allows `docker pull` to retrieve signed images.
|
||||
|
||||
```
|
||||
$ docker pull docker/seaside
|
||||
Using default tag: latest
|
||||
Pull (1 of 1): docker/trusttest:latest@sha256:d149ab53f871
|
||||
...
|
||||
Tagging docker/trusttest@sha256:d149ab53f871 as docker/trusttest:latest
|
||||
```
|
||||
|
||||
The `seaside:latest` image is signed. In the following example, the command does not specify a tag, so the system uses
|
||||
the `latest` tag by default again and the `docker/cliffs:latest` tag is not signed.
|
||||
|
||||
```bash
|
||||
$ docker pull docker/cliffs
|
||||
Using default tag: latest
|
||||
no trust data available
|
||||
```
|
||||
|
||||
Because the tag `docker/cliffs:latest` is not trusted, the `pull` fails.
|
||||
|
||||
|
||||
### Disable content trust for specific operations
|
||||
|
||||
A user that wants to disable content trust for a particular operation can use the
|
||||
`--disable-content-trust` flag. **Warning: this flag disables content trust for
|
||||
this operation**. With this flag, Docker will ignore content-trust and allow all
|
||||
operations to be done without verifying any signatures. If we wanted the
|
||||
previous untrusted build to succeed we could do:
|
||||
|
||||
```
|
||||
$ cat Dockerfile
|
||||
FROM docker/trusttest:notrust
|
||||
RUN echo
|
||||
$ docker build --disable-content-trust -t docker/trusttest:testing .
|
||||
Sending build context to Docker daemon 42.84 MB
|
||||
...
|
||||
Successfully built f21b872447dc
|
||||
```
|
||||
|
||||
The same is true for all the other commands, such as `pull` and `push`:
|
||||
|
||||
```
|
||||
$ docker pull --disable-content-trust docker/trusttest:untrusted
|
||||
...
|
||||
$ docker push --disable-content-trust docker/trusttest:untrusted
|
||||
...
|
||||
```
|
||||
|
||||
## Related information
|
||||
|
||||
* [Manage keys for content trust](/security/trust/trust_key_mng)
|
||||
* [Automation with content trust](/security/trust/trust_automation)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
|
||||
|
||||
|
||||
BIN
docs/security/trust/images/tag_signing.png
Normal file
BIN
docs/security/trust/images/tag_signing.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 73 KiB |
1
docs/security/trust/images/trust_.gliffy
Normal file
1
docs/security/trust/images/trust_.gliffy
Normal file
File diff suppressed because one or more lines are too long
1
docs/security/trust/images/trust_components.gliffy
Normal file
1
docs/security/trust/images/trust_components.gliffy
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/security/trust/images/trust_components.png
Normal file
BIN
docs/security/trust/images/trust_components.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 121 KiB |
1
docs/security/trust/images/trust_signing.gliffy
Normal file
1
docs/security/trust/images/trust_signing.gliffy
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/security/trust/images/trust_signing.png
Normal file
BIN
docs/security/trust/images/trust_signing.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 70 KiB |
1
docs/security/trust/images/trust_view.gliffy
Normal file
1
docs/security/trust/images/trust_view.gliffy
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/security/trust/images/trust_view.png
Normal file
BIN
docs/security/trust/images/trust_view.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 58 KiB |
21
docs/security/trust/index.md
Normal file
21
docs/security/trust/index.md
Normal file
@@ -0,0 +1,21 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Use trusted images"
|
||||
description = "Use trusted images"
|
||||
keywords = ["trust, security, docker, index"]
|
||||
[menu.main]
|
||||
identifier="smn_content_trust"
|
||||
parent= "mn_docker_hub"
|
||||
weight=4
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Use trusted images
|
||||
|
||||
The following topics are available:
|
||||
|
||||
* [Content trust in Docker](/security/trust/content_trust)
|
||||
* [Manage keys for content trust](/security/trust/trust_key_mng)
|
||||
* [Automation with content trust](/security/trust/trust_automation)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
|
||||
79
docs/security/trust/trust_automation.md
Normal file
79
docs/security/trust/trust_automation.md
Normal file
@@ -0,0 +1,79 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Automation with content trust"
|
||||
description = "Automating content push pulls with trust"
|
||||
keywords = ["trust, security, docker, documentation, automation"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Automation with content trust
|
||||
|
||||
Your automation systems that pull or build images can also work with trust. Any automation environment must set `DOCKER_TRUST_ENABLED` either manually or in in a scripted fashion before processing images.
|
||||
|
||||
## Bypass requests for passphrases
|
||||
|
||||
To allow tools to wrap docker and push trusted content, there are two
|
||||
environment variables that allow you to provide the passphrases without an
|
||||
expect script, or typing them in:
|
||||
|
||||
- `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE`
|
||||
- `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE`
|
||||
|
||||
Docker attempts to use the contents of these environment variables as passphrase
|
||||
for the keys. For example, an image publisher can export the repository `target`
|
||||
and `snapshot` passphrases:
|
||||
|
||||
```bash
|
||||
$ export DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE="u7pEQcGoebUHm6LHe6"
|
||||
$ export DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE="l7pEQcTKJjUHm6Lpe4"
|
||||
```
|
||||
|
||||
Then, when pushing a new tag the Docker client does not request these values but signs automatically:
|
||||
|
||||
``bash
|
||||
$ docker push docker/trusttest:latest
|
||||
The push refers to a repository [docker.io/docker/trusttest] (len: 1)
|
||||
a9539b34a6ab: Image already exists
|
||||
b3dbab3810fc: Image already exists
|
||||
latest: digest: sha256:d149ab53f871 size: 3355
|
||||
Signing and pushing trust metadata
|
||||
```
|
||||
|
||||
## Building with content trust
|
||||
|
||||
You can also build with content trust. Before running the `docker build` command, you should set the environment variable `DOCKER_CONTENT_TRUST` either manually or in in a scripted fashion. Consider the simple Dockerfile below.
|
||||
|
||||
```Dockerfilea
|
||||
FROM docker/trusttest:latest
|
||||
RUN echo
|
||||
```
|
||||
|
||||
The `FROM` tag is pulling a signed image. You cannot build an image that has a
|
||||
`FROM` that is not either present locally or signed. Given that content trust
|
||||
data exists for the tag `latest`, the following build should succeed:
|
||||
|
||||
```bash
|
||||
$ docker build -t docker/trusttest:testing .
|
||||
Using default tag: latest
|
||||
latest: Pulling from docker/trusttest
|
||||
|
||||
b3dbab3810fc: Pull complete
|
||||
a9539b34a6ab: Pull complete
|
||||
Digest: sha256:d149ab53f871
|
||||
```
|
||||
|
||||
If content trust is enabled, building from a Dockerfile that relies on tag without trust data, causes the build command to fail:
|
||||
|
||||
```bash
|
||||
$ docker build -t docker/trusttest:testing .
|
||||
unable to process Dockerfile: No trust data for notrust
|
||||
```
|
||||
|
||||
## Related information
|
||||
|
||||
* [Content trust in Docker](/security/trust/content_trust)
|
||||
* [Manage keys for content trust](/security/trust/trust_key_mng)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
|
||||
74
docs/security/trust/trust_key_mng.md
Normal file
74
docs/security/trust/trust_key_mng.md
Normal file
@@ -0,0 +1,74 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Manage keys for content trust"
|
||||
description = "Manage keys for content trust"
|
||||
keywords = ["trust, security, root, keys, repository"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Manage keys for content trust
|
||||
|
||||
Trust for an image tag is managed through the use of keys. Docker's content
|
||||
trust makes use four different keys:
|
||||
|
||||
| Key | Description |
|
||||
|---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| offline key | Root of content trust for a image tag. When content trust is enabled, you create the offline key once. |
|
||||
| target and snapshot | These two keys are known together as the "tagging" key. When content trust is enabled, you create this key when you add a new image repository. If you have the offline key, you can export the tagging key and allow other publishers to sign the image tags. |
|
||||
| timestamp | This key applies to a repository. It allows Docker repositories to have freshness security guarantees without requiring periodic content refreshes on the client's side. |
|
||||
|
||||
With the exception of the timestamp, all the keys are generated and stored locally
|
||||
client-side. The timestamp is safely generated and stored in a signing server that
|
||||
is deployed alongside the Docker registry. All keys are generated in a backend
|
||||
service that isn't directly exposed to the internet and are encrypted at rest.
|
||||
|
||||
## Choosing a passphrase
|
||||
|
||||
The passphrases you chose for both the offline key and your tagging key should
|
||||
be randomly generated and stored in a password manager. Having the tagging key
|
||||
allow users to sign image tags on a repository. Passphrases are used to encrypt
|
||||
your keys at rest and ensures that a lost laptop or an unintended backup doesn't
|
||||
put the private key material at risk.
|
||||
|
||||
## Back up your keys
|
||||
|
||||
All the Docker trust keys are stored encrypted using the passphrase you provide
|
||||
on creation. Even so, you should still take care of the location where you back them up.
|
||||
Good practice is to create two encrypted USB keys.
|
||||
|
||||
It is very important that you backup your keys to a safe, secure location. Loss
|
||||
of the tagging key is recoverable; loss of the offline key is not.
|
||||
|
||||
The Docker client stores the keys in the `~/.docker/trust/private` directory.
|
||||
Before backing them up, you should `tar` them into an archive:
|
||||
|
||||
```bash
|
||||
$ tar -zcvf private_keys_backup.tar.gz ~/.docker/trust/private
|
||||
$ chmod 600 private_keys_backup.tar.gz
|
||||
```
|
||||
|
||||
## Lost keys
|
||||
|
||||
If a publisher loses keys it means losing the ability to sign trusted content for
|
||||
your repositories. If you lose a key, contact [Docker
|
||||
Support](https://support.docker.com) (support@docker.com) to reset the repository
|
||||
state.
|
||||
|
||||
This loss also requires **manual intervention** from every consumer that pulled
|
||||
the tagged image prior to the loss. Image consumers would get an error for
|
||||
content that they already downloaded:
|
||||
|
||||
```
|
||||
could not validate the path to a trusted root: failed to validate data with current trusted certificates
|
||||
```
|
||||
|
||||
To correct this, they need to download a new image tag with that is signed with
|
||||
the new key.
|
||||
|
||||
## Related information
|
||||
|
||||
* [Content trust in Docker](/security/trust/content_trust)
|
||||
* [Automation with content trust](/security/trust/trust_automation)
|
||||
* [Play in a content trust sandbox](/security/trust/trust_sandbox)
|
||||
331
docs/security/trust/trust_sandbox.md
Normal file
331
docs/security/trust/trust_sandbox.md
Normal file
@@ -0,0 +1,331 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
title = "Play in a content trust sandbox"
|
||||
description = "Play in a trust sandbox"
|
||||
keywords = ["trust, security, root, keys, repository, sandbox"]
|
||||
[menu.main]
|
||||
parent= "smn_content_trust"
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Play in a content trust sandbox
|
||||
|
||||
This page explains how to set up and use a sandbox for experimenting with trust.
|
||||
The sandbox allows you to configure and try trust operations locally without
|
||||
impacting your production images.
|
||||
|
||||
Before working through this sandbox, you should have read through the [trust
|
||||
overview](content_trust.md).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
These instructions assume you are running in Linux or Mac OS X. You can run
|
||||
this sandbox on a local machine or on a virtual machine. You will need to
|
||||
have `sudo` privileges on your local machine or in the VM.
|
||||
|
||||
This sandbox requires you to install two Docker tools: Docker Engine and Docker
|
||||
Compose. To install the Docker Engine, choose from the [list of supported
|
||||
platforms]({{< relref "installation.md" >}}). To install Docker Compose, see the
|
||||
[detailed instructions here]({{< relref "compose/install" >}}).
|
||||
|
||||
Finally, you'll need to have `git` installed on your local system or VM.
|
||||
|
||||
## What is in the sandbox?
|
||||
|
||||
If you are just using trust out-of-the-box you only need your Docker Engine
|
||||
client and access to Docker's own public hub. The sandbox mimics a
|
||||
production trust environment, and requires these additional components:
|
||||
|
||||
| Container | Description |
|
||||
|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| nostarysandbox | A container with the latest version of Docker Engine and with some preconfigured certifications. This is your sandbox where you can use the `docker` client to test trust operations. |
|
||||
| Registry server | A local registry service. |
|
||||
| Notary server | The service that does all the heavy-lifting of managing trust |
|
||||
| Notary signer | A service that ensures that your keys are secure. |
|
||||
| MySQL | The database where all of the trust information will be stored |
|
||||
|
||||
The sandbox uses the Docker daemon on your local system. Within the `nostarysandbox`
|
||||
you interact with a local registry rather than the public Docker Hub. This means
|
||||
your everyday image repositories are not used. They are protected while you play.
|
||||
|
||||
When you play in the sandbox, you'll also create root and tagging keys. The
|
||||
sandbox is configured to store all the keys and files inside the `notarysandbox`
|
||||
container. Since the keys you create in the sandbox are for play only,
|
||||
destroying the container destroys them as well.
|
||||
|
||||
|
||||
## Build the sandbox
|
||||
|
||||
In this section, you build the Docker components for your trust sandbox. If you
|
||||
work exclusively with the Docker Hub, you would not need with these components.
|
||||
They are built into the Docker Hub for you. For the sandbox, however, you must
|
||||
build your own entire, mock production environment and registry.
|
||||
|
||||
### Configure /etc/hosts
|
||||
|
||||
The sandbox' `notaryserver` and `sandboxregistry` run on your local server. The
|
||||
client inside the `notarysandbox` container connects to them over your network.
|
||||
So, you'll need an entry for both the servers in your local `/etc/hosts` file.
|
||||
|
||||
1. Add an entry for the `notaryserver` to `/etc/hosts`.
|
||||
|
||||
$ sudo sh -c 'echo "127.0.0.1 notaryserver" >> /etc/hosts'
|
||||
|
||||
2. Add an entry for the `sandboxregistry` to `/etc/hosts`.
|
||||
|
||||
$ sudo sh -c 'echo "127.0.0.1 sandboxregistry" >> /etc/hosts'
|
||||
|
||||
|
||||
### Build the notarytest image
|
||||
|
||||
1. Create a `notarytest` directory on your system.
|
||||
|
||||
$ mkdir notarysandbox
|
||||
|
||||
2. Change into your `notarysandbox` directory.
|
||||
|
||||
$ cd notarysandbox
|
||||
|
||||
3. Create a `notarytest` directory then change into that.
|
||||
|
||||
$ mkdir notarytest
|
||||
$ cd nostarytest
|
||||
|
||||
4. Create a filed called `Dockerfile` with your favorite editor.
|
||||
|
||||
5. Add the following to the new file.
|
||||
|
||||
FROM debian:jessie
|
||||
|
||||
ADD https://master.dockerproject.org/linux/amd64/docker /usr/bin/docker
|
||||
RUN chmod +x /usr/bin/docker \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
tree \
|
||||
vim \
|
||||
git \
|
||||
ca-certificates \
|
||||
--no-install-recommends
|
||||
|
||||
WORKDIR /root
|
||||
RUN git clone -b trust-sandbox https://github.com/docker/notary.git
|
||||
RUN cp /root/notary/fixtures/root-ca.crt /usr/local/share/ca-certificates/root-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
ENTRYPOINT ["bash"]
|
||||
|
||||
6. Save and close the file.
|
||||
|
||||
7. Build the testing container.
|
||||
|
||||
$ docker build -t nostarysandbox .
|
||||
Sending build context to Docker daemon 2.048 kB
|
||||
Step 0 : FROM debian:jessie
|
||||
...
|
||||
Successfully built 5683f17e9d72
|
||||
|
||||
|
||||
### Build and start up the trust servers
|
||||
|
||||
In this step, you get the source code for your notary and registry services.
|
||||
Then, you'll use Docker Compose to build and start them on your local system.
|
||||
|
||||
1. Change to back to the root of your `notarysandbox` directory.
|
||||
|
||||
$ cd notarysandbox
|
||||
|
||||
2. Clone the `notary` project.
|
||||
|
||||
$ git clone -b trust-sandbox https://github.com/docker/notary.git
|
||||
|
||||
3. Clone the `distribution` project.
|
||||
|
||||
$ git clone https://github.com/docker/distribution.git
|
||||
|
||||
4. Change to the `notary` project directory.
|
||||
|
||||
$ cd notary
|
||||
|
||||
The directory contains a `docker-compose` file that you'll use to run a
|
||||
notary server together with a notary signer and the corresponding MySQL
|
||||
databases. The databases store the trust information for an image.
|
||||
|
||||
5. Build the server images.
|
||||
|
||||
$ docker-compose build
|
||||
|
||||
The first time you run this, the build takes some time.
|
||||
|
||||
6. Run the server containers on your local system.
|
||||
|
||||
$ docker-compose up -d
|
||||
|
||||
Once the trust services are up, you'll setup a local version of the Docker
|
||||
Registry v2.
|
||||
|
||||
7. Change to the `nostarysandbox/distribution` directory.
|
||||
|
||||
8. Build the `sandboxregistry` server.
|
||||
|
||||
$ docker build -t sandboxregistry .
|
||||
|
||||
9. Start the `sandboxregistry` server running.
|
||||
|
||||
$ docker run -p 5000:5000 --name sandboxregistry sandboxregistry &
|
||||
|
||||
## Playing in the sandbox
|
||||
|
||||
Now that everything is setup, you can go into your `nostarysandbox` container and
|
||||
start testing Docker content trust.
|
||||
|
||||
|
||||
### Start the notarysandbox container
|
||||
|
||||
In this procedure, you start the `notarysandbox` and link it to the running
|
||||
`notary_notaryserver_1` and `sandboxregistry` containers. The links allow
|
||||
communication among the containers.
|
||||
|
||||
```
|
||||
$ docker run -it -v /var/run/docker.sock:/var/run/docker.sock --link notary_notaryserver_1:notaryserver --link sandboxregistry:sandboxregistry nostarysandbox
|
||||
root@0710762bb59a:/#
|
||||
```
|
||||
|
||||
Mounting the `docker.sock` gives the `nostarysandbox` access to the `docker`
|
||||
deamon on your host, while storing all the keys and files inside the sandbox
|
||||
container. When you destroy the container, you destroy the "play" keys.
|
||||
|
||||
### Test some trust operations
|
||||
|
||||
Now, you'll pull some images.
|
||||
|
||||
1. Download a `docker` image to test with.
|
||||
|
||||
# docker pull docker/trusttest
|
||||
docker pull docker/trusttest
|
||||
Using default tag: latest
|
||||
latest: Pulling from docker/trusttest
|
||||
|
||||
b3dbab3810fc: Pull complete
|
||||
a9539b34a6ab: Pull complete
|
||||
Digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a
|
||||
Status: Downloaded newer image for docker/trusttest:latest
|
||||
|
||||
2. Tag it to be pushed to our sandbox registry:
|
||||
|
||||
# docker tag docker/trusttest sandboxregistry:5000/test/trusttest:latest
|
||||
|
||||
3. Enable content trust.
|
||||
|
||||
# export DOCKER_CONTENT_TRUST=1
|
||||
|
||||
4. Identify the trust server.
|
||||
|
||||
# export DOCKER_CONTENT_TRUST_SERVER=https://notaryserver:4443
|
||||
|
||||
This step is only necessary because the sandbox is using its own server.
|
||||
Normally, if you are using the Docker Public Hub this step isn't necessary.
|
||||
|
||||
5. Pull the test image.
|
||||
|
||||
# docker pull sandboxregistry:5000/test/trusttest
|
||||
Using default tag: latest
|
||||
no trust data available
|
||||
|
||||
You see an error, because this content doesn't exist on the `sandboxregistry` yet.
|
||||
|
||||
6. Push the trusted image.
|
||||
|
||||
# docker push sandboxregistry:5000/test/trusttest:latest
|
||||
The push refers to a repository [sandboxregistry:5000/test/trusttest] (len: 1)
|
||||
a9539b34a6ab: Image successfully pushed
|
||||
b3dbab3810fc: Image successfully pushed
|
||||
latest: digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c size: 3348
|
||||
Signing and pushing trust metadata
|
||||
You are about to create a new root signing key passphrase. This passphrase
|
||||
will be used to protect the most sensitive key in your signing system. Please
|
||||
choose a long, complex passphrase and be careful to keep the password and the
|
||||
key file itself secure and backed up. It is highly recommended that you use a
|
||||
password manager to generate the passphrase and keep it safe. There will be no
|
||||
way to recover this key. You can find the key in your config directory.
|
||||
Enter passphrase for new offline key with id 8c69e04:
|
||||
Repeat passphrase for new offline key with id 8c69e04:
|
||||
Enter passphrase for new tagging key with id sandboxregistry:5000/test/trusttest (93c362a):
|
||||
Repeat passphrase for new tagging key with id sandboxregistry:5000/test/trusttest (93c362a):
|
||||
Finished initializing "sandboxregistry:5000/test/trusttest"
|
||||
latest: digest: sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a size: 3355
|
||||
Signing and pushing trust metadata
|
||||
|
||||
7. Try pulling the image you just pushed:
|
||||
|
||||
# docker pull sandboxregistry:5000/test/trusttest
|
||||
Using default tag: latest
|
||||
Pull (1 of 1): sandboxregistry:5000/test/trusttest:latest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c: Pulling from test/trusttest
|
||||
b3dbab3810fc: Already exists
|
||||
a9539b34a6ab: Already exists
|
||||
Digest: sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
Status: Downloaded newer image for sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
Tagging sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c as sandboxregistry:5000/test/trusttest:latest
|
||||
|
||||
|
||||
### Test with malicious images
|
||||
|
||||
What happens when data is corrupted and you try to pull it when trust is
|
||||
enabled? In this section, you go into the `sandboxregistry` and tamper with some
|
||||
data. Then, you try and pull it.
|
||||
|
||||
1. Leave the sandbox container running.
|
||||
|
||||
2. Open a new bash terminal from your host into the `sandboxregistry`.
|
||||
|
||||
$ docker exec -it sandboxregistry bash
|
||||
296db6068327#
|
||||
|
||||
3. Change into the registry storage.
|
||||
|
||||
You'll need to provide the `sha` you received when you pushed the image.
|
||||
|
||||
# cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042
|
||||
|
||||
4. Add malicious data to one of the trusttest layers:
|
||||
|
||||
# echo "Malicious data" > data
|
||||
|
||||
5. Got back to your sandbox terminal.
|
||||
|
||||
6. List the trusttest image.
|
||||
|
||||
# docker images | grep trusttest
|
||||
docker/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB
|
||||
sandboxregistry:5000/test/trusttest latest a9539b34a6ab 7 weeks ago 5.025 MB
|
||||
sandboxregistry:5000/test/trusttest <none> a9539b34a6ab 7 weeks ago 5.025 MB
|
||||
|
||||
7. Remove the `trusttest:latest` image.
|
||||
|
||||
# docker rmi -f a9539b34a6ab
|
||||
Untagged: docker/trusttest:latest
|
||||
Untagged: sandboxregistry:5000/test/trusttest:latest
|
||||
Untagged: sandboxregistry:5000/test/trusttest@sha256:1d871dcb16805f0604f10d31260e79c22070b35abc71a3d1e7ee54f1042c8c7c
|
||||
Deleted: a9539b34a6aba01d3942605dfe09ab821cd66abf3cf07755b0681f25ad81f675
|
||||
Deleted: b3dbab3810fc299c21f0894d39a7952b363f14520c2f3d13443c669b63b6aa20
|
||||
|
||||
8. Pull the image again.
|
||||
|
||||
# docker pull sandboxregistry:5000/test/trusttest
|
||||
Using default tag: latest
|
||||
...
|
||||
b3dbab3810fc: Verifying Checksum
|
||||
a9539b34a6ab: Pulling fs layer
|
||||
filesystem layer verification failed for digest sha256:aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042
|
||||
|
||||
You'll see the the pull did not complete because the trust system was
|
||||
unable to verify the image.
|
||||
|
||||
## More play in the sandbox
|
||||
|
||||
Now, that you have a full Docker content trust sandbox on your local system,
|
||||
feel free to play with it and see how it behaves. If you find any security
|
||||
issues with Docker, feel free to send us an email at <security@docker.com>.
|
||||
|
||||
|
||||
|
||||
@@ -74,16 +74,21 @@ The output will provide details on the container configurations including the
|
||||
volumes. The output should look something similar to the following:
|
||||
|
||||
...
|
||||
"Volumes": {
|
||||
"/webapp": "/var/lib/docker/volumes/fac362...80535"
|
||||
},
|
||||
"VolumesRW": {
|
||||
"/webapp": true
|
||||
}
|
||||
Mounts": [
|
||||
{
|
||||
"Name": "fac362...80535",
|
||||
"Source": "/var/lib/docker/volumes/fac362...80535/_data",
|
||||
"Destination": "/webapp",
|
||||
"Driver": "local",
|
||||
"Mode": "",
|
||||
"RW": true
|
||||
}
|
||||
]
|
||||
...
|
||||
|
||||
You will notice in the above 'Volumes' is specifying the location on the host and
|
||||
'VolumesRW' is specifying that the volume is read/write.
|
||||
You will notice in the above 'Source' is specifying the location on the host and
|
||||
'Destination' is specifying the volume location inside the container. `RW` shows
|
||||
if the volume is read/write.
|
||||
|
||||
### Mount a host directory as a data volume
|
||||
|
||||
|
||||
53
docs/userguide/image_management.md
Normal file
53
docs/userguide/image_management.md
Normal file
@@ -0,0 +1,53 @@
|
||||
<!--[metadata]>
|
||||
+++
|
||||
alias = [ "/reference/api/hub_registry_spec/"]
|
||||
title = "Image management"
|
||||
description = "Documentation for docker Registry and Registry API"
|
||||
keywords = ["docker, registry, api, hub"]
|
||||
[menu.main]
|
||||
parent="mn_docker_hub"
|
||||
weight=-1
|
||||
+++
|
||||
<![end-metadata]-->
|
||||
|
||||
# Image management
|
||||
|
||||
The Docker Engine provides a client which you can use to create images on the command line or through a build process. You can run these images in a container or publish them for others to use. Storing the images you create, searching for images you might want, or publishing images others might use are all elements of image management.
|
||||
|
||||
This section provides an overview of the major features and products Docker provides for image management.
|
||||
|
||||
|
||||
## Docker Hub
|
||||
|
||||
The [Docker Hub](https://docs.docker.com/docker-hub/) is responsible for centralizing information about user accounts, images, and public name spaces. It has different components:
|
||||
|
||||
- Web UI
|
||||
- Meta-data store (comments, stars, list public repositories)
|
||||
- Authentication service
|
||||
- Tokenization
|
||||
|
||||
There is only one instance of the Docker Hub, run and managed by Docker Inc. This public Hub is useful for most individuals and smaller companies.
|
||||
|
||||
## Docker Registry and the Docker Trusted Registry
|
||||
|
||||
The Docker Registry is a component of Docker's ecosystem. A registry is a
|
||||
storage and content delivery system, holding named Docker images, available in
|
||||
different tagged versions. For example, the image `distribution/registry`, with
|
||||
tags `2.0` and `latest`. Users interact with a registry by using docker push and
|
||||
pull commands. For example, `docker pull myregistry.com/stevvooe/batman:voice`.
|
||||
|
||||
The Docker Hub has its own registry which, like the Hub itself, is run and managed by Docker. There are other ways to obtain a registry. You can purchase the [Docker Trusted Registry](https://docs.docker.com/dockter-trusted-registry) product to run on your company's network. Alternatively, you can use the Docker Registry component to build a private registry. For information about using a registry, see overview for the [Docker Registry](https://docs.docker.com/registry).
|
||||
|
||||
|
||||
## Content Trust
|
||||
|
||||
When transferring data among networked systems, *trust* is a central concern. In
|
||||
particular, when communicating over an untrusted medium such as the internet, it
|
||||
is critical to ensure the integrity and publisher of the all the data a system
|
||||
operates on. You use Docker to push and pull images (data) to a registry.
|
||||
Content trust gives you the ability to both verify the integrity and the
|
||||
publisher of all the data received from a registry over any channel.
|
||||
|
||||
[Content trust](/security/trust) is currently only available for users of the
|
||||
public Docker Hub. It is currently not available for the Docker Trusted Registry
|
||||
or for private registries.
|
||||
@@ -13,18 +13,18 @@ please feel free to provide any feedback on these features you wish.
|
||||
|
||||
Unlike the regular Docker binary, the experimental channels is built and updated nightly on TO.BE.ANNOUNCED. From one day to the next, new features may appear, while existing experimental features may be refined or entirely removed.
|
||||
|
||||
1. Verify that you have `wget` installed.
|
||||
1. Verify that you have `curl` installed.
|
||||
|
||||
$ which wget
|
||||
$ which curl
|
||||
|
||||
If `wget` isn't installed, install it after updating your manager:
|
||||
If `curl` isn't installed, install it after updating your manager:
|
||||
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install wget
|
||||
$ sudo apt-get install curl
|
||||
|
||||
2. Get the latest Docker package.
|
||||
|
||||
$ wget -qO- https://experimental.docker.com/ | sh
|
||||
$ curl -sSL https://experimental.docker.com/ | sh
|
||||
|
||||
The system prompts you for your `sudo` password. Then, it downloads and
|
||||
installs Docker and its dependencies.
|
||||
@@ -34,7 +34,7 @@ Unlike the regular Docker binary, the experimental channels is built and updated
|
||||
>command fails for the Docker repo during installation. To work around this,
|
||||
>add the key directly using the following:
|
||||
>
|
||||
> $ wget -qO- https://experimental.docker.com/gpg | sudo apt-key add -
|
||||
> $ curl -sSL https://experimental.docker.com/gpg | sudo apt-key add -
|
||||
|
||||
3. Verify `docker` is installed correctly.
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
|
||||
if err != nil {
|
||||
if c != nil {
|
||||
// Another pull of the same repository is already taking place; just wait for it to finish
|
||||
p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName)
|
||||
p.config.OutStream.Write(p.sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", p.repoInfo.CanonicalName))
|
||||
<-c
|
||||
return nil
|
||||
}
|
||||
@@ -140,9 +140,9 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
blobs := p.repo.Blobs(nil)
|
||||
blobs := p.repo.Blobs(context.Background())
|
||||
|
||||
desc, err := blobs.Stat(nil, di.digest)
|
||||
desc, err := blobs.Stat(context.Background(), di.digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error statting layer: %v", err)
|
||||
di.err <- err
|
||||
@@ -150,7 +150,7 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||
}
|
||||
di.size = desc.Size
|
||||
|
||||
layerDownload, err := blobs.Open(nil, di.digest)
|
||||
layerDownload, err := blobs.Open(context.Background(), di.digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error fetching layer: %v", err)
|
||||
di.err <- err
|
||||
@@ -223,6 +223,9 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
|
||||
go func() {
|
||||
if _, err := io.Copy(out, pipeReader); err != nil {
|
||||
logrus.Errorf("error copying from layer download progress reader: %s", err)
|
||||
if err := pipeReader.CloseWithError(err); err != nil {
|
||||
logrus.Errorf("error closing the progress reader: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
@@ -232,6 +235,9 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
|
||||
// set the error. All successive reads/writes will return with this
|
||||
// error.
|
||||
pipeWriter.CloseWithError(errors.New("download canceled"))
|
||||
} else {
|
||||
// If no error then just close the pipe.
|
||||
pipeWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -29,13 +29,12 @@ func (s *TagStore) NewPusher(endpoint registry.APIEndpoint, localRepo Repository
|
||||
switch endpoint.Version {
|
||||
case registry.APIVersion2:
|
||||
return &v2Pusher{
|
||||
TagStore: s,
|
||||
endpoint: endpoint,
|
||||
localRepo: localRepo,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
sf: sf,
|
||||
layersSeen: make(map[string]bool),
|
||||
TagStore: s,
|
||||
endpoint: endpoint,
|
||||
localRepo: localRepo,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
sf: sf,
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Pusher{
|
||||
|
||||
@@ -27,11 +27,6 @@ type v2Pusher struct {
|
||||
config *ImagePushConfig
|
||||
sf *streamformatter.StreamFormatter
|
||||
repo distribution.Repository
|
||||
|
||||
// layersSeen is the set of layers known to exist on the remote side.
|
||||
// This avoids redundant queries when pushing multiple tags that
|
||||
// involve the same layers.
|
||||
layersSeen map[string]bool
|
||||
}
|
||||
|
||||
func (p *v2Pusher) Push() (fallback bool, err error) {
|
||||
@@ -92,6 +87,8 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
return fmt.Errorf("tag does not exist: %s", tag)
|
||||
}
|
||||
|
||||
layersSeen := make(map[string]bool)
|
||||
|
||||
layer, err := p.graph.Get(layerId)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -120,7 +117,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.layersSeen[layer.ID] {
|
||||
if layersSeen[layer.ID] {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -141,7 +138,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
dgst, err := p.graph.GetDigest(layer.ID)
|
||||
switch err {
|
||||
case nil:
|
||||
_, err := p.repo.Blobs(nil).Stat(nil, dgst)
|
||||
_, err := p.repo.Blobs(context.Background()).Stat(context.Background(), dgst)
|
||||
switch err {
|
||||
case nil:
|
||||
exists = true
|
||||
@@ -161,7 +158,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
// if digest was empty or not saved, or if blob does not exist on the remote repository,
|
||||
// then fetch it.
|
||||
if !exists {
|
||||
if pushDigest, err := p.pushV2Image(p.repo.Blobs(nil), layer); err != nil {
|
||||
if pushDigest, err := p.pushV2Image(p.repo.Blobs(context.Background()), layer); err != nil {
|
||||
return err
|
||||
} else if pushDigest != dgst {
|
||||
// Cache new checksum
|
||||
@@ -175,7 +172,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
|
||||
m.FSLayers = append(m.FSLayers, manifest.FSLayer{BlobSum: dgst})
|
||||
m.History = append(m.History, manifest.History{V1Compatibility: string(jsonData)})
|
||||
|
||||
p.layersSeen[layer.ID] = true
|
||||
layersSeen[layer.ID] = true
|
||||
}
|
||||
|
||||
logrus.Infof("Signed manifest for %s:%s using daemon's key: %s", p.repo.Name(), tag, p.trustKey.KeyID())
|
||||
@@ -229,7 +226,7 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
||||
|
||||
// Send the layer
|
||||
logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
|
||||
layerUpload, err := bs.Create(nil)
|
||||
layerUpload, err := bs.Create(context.Background())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -253,7 +250,7 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
|
||||
}
|
||||
|
||||
desc := distribution.Descriptor{Digest: dgst}
|
||||
if _, err := layerUpload.Commit(nil, desc); err != nil {
|
||||
if _, err := layerUpload.Commit(context.Background(), desc); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
|
||||
@@ -20,21 +20,70 @@ APTDIR=$DOCKER_RELEASE_DIR/apt/repo
|
||||
# setup the apt repo (if it does not exist)
|
||||
mkdir -p "$APTDIR/conf" "$APTDIR/db"
|
||||
|
||||
# supported arches/sections
|
||||
arches=( amd64 i386 )
|
||||
components=( main testing experimental )
|
||||
|
||||
# create/update distributions file
|
||||
if [[ ! -f "$APTDIR/conf/distributions" ]]; then
|
||||
if [ ! -f "$APTDIR/conf/distributions" ]; then
|
||||
for suite in $(exec contrib/reprepro/suites.sh); do
|
||||
cat <<-EOF
|
||||
Origin: Docker
|
||||
Suite: $suite
|
||||
Codename: $suite
|
||||
Architectures: amd64 i386
|
||||
Components: main testing experimental
|
||||
Architectures: ${arches[*]}
|
||||
Components: ${components[*]}
|
||||
Description: Docker APT Repository
|
||||
|
||||
EOF
|
||||
done > "$APTDIR/conf/distributions"
|
||||
fi
|
||||
|
||||
# create/update distributions file
|
||||
if [ ! -f "$APTDIR/conf/apt-ftparchive.conf" ]; then
|
||||
cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf"
|
||||
Dir {
|
||||
ArchiveDir "${APTDIR}";
|
||||
CacheDir "${APTDIR}/db";
|
||||
};
|
||||
|
||||
Default {
|
||||
Packages::Compress ". gzip bzip2";
|
||||
Sources::Compress ". gzip bzip2";
|
||||
Contents::Compress ". gzip bzip2";
|
||||
};
|
||||
|
||||
TreeDefault {
|
||||
BinCacheDB "packages-\$(SECTION)-\$(ARCH).db";
|
||||
Directory "pool/\$(SECTION)";
|
||||
Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages";
|
||||
SrcDirectory "pool/\$(SECTION)";
|
||||
Sources "\$(DIST)/\$(SECTION)/source/Sources";
|
||||
Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)";
|
||||
FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist";
|
||||
};
|
||||
EOF
|
||||
|
||||
for suite in $(exec contrib/reprepro/suites.sh); do
|
||||
cat <<-EOF
|
||||
Tree "dists/${suite}" {
|
||||
Sections "main testing experimental";
|
||||
Architectures "${arches[*]}";
|
||||
}
|
||||
|
||||
EOF
|
||||
done >> "$APTDIR/conf/apt-ftparchive.conf"
|
||||
fi
|
||||
|
||||
if [ ! -f "$APTDIR/conf/docker-engine-release.conf" ]; then
|
||||
cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf"
|
||||
APT::FTPArchive::Release::Origin "Docker";
|
||||
APT::FTPArchive::Release::Components "${components[*]}";
|
||||
APT::FTPArchive::Release::Label "Docker APT Repository";
|
||||
APT::FTPArchive::Release::Architectures "${arches[*]}";
|
||||
EOF
|
||||
fi
|
||||
|
||||
# set the component and priority for the version being released
|
||||
component="main"
|
||||
priority=700
|
||||
@@ -67,4 +116,35 @@ for dir in contrib/builder/deb/*/; do
|
||||
reprepro -v --keepunreferencedfiles \
|
||||
-S docker-engine -P "$priority" -C "$component" \
|
||||
-b "$APTDIR" includedeb "$codename" "${DEBFILE[@]}"
|
||||
|
||||
# update the filelist for this codename/component
|
||||
find "$APTDIR/pool/$component" \
|
||||
-name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist"
|
||||
done
|
||||
|
||||
|
||||
# run the apt-ftparchive commands so we can have pinning
|
||||
apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf"
|
||||
|
||||
for dir in contrib/builder/deb/*/; do
|
||||
version="$(basename "$dir")"
|
||||
codename="${version//debootstrap-}"
|
||||
|
||||
apt-ftparchive \
|
||||
-o "APT::FTPArchive::Release::Codename=$codename" \
|
||||
-o "APT::FTPArchive::Release::Suite=$codename" \
|
||||
-c "$APTDIR/conf/docker-engine-release.conf" \
|
||||
release \
|
||||
"$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release"
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
apt-ftparchive \
|
||||
-o "APT::FTPArchive::Release::Codename=$codename" \
|
||||
-o "APT::FTPArchive::Release::Suite=$codename" \
|
||||
-o "APT::FTPArchive::Release::Component=$component" \
|
||||
-o "APT::FTPArchive::Release::Architecture=$arch" \
|
||||
-c "$APTDIR/conf/docker-engine-release.conf" \
|
||||
release \
|
||||
"$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release"
|
||||
done
|
||||
done
|
||||
|
||||
@@ -21,29 +21,29 @@ clone git golang.org/x/net 3cffabab72adf04f8e3b01c5baf775361837b5fe https://gith
|
||||
clone hg code.google.com/p/gosqlite 74691fb6f837
|
||||
|
||||
#get libnetwork packages
|
||||
clone git github.com/docker/libnetwork bd3eecc96f3c05a4acef1bedcf74397bc6850d22
|
||||
clone git github.com/docker/libnetwork bc565c2d295067c1a43674a23a473ec6336d7fd4
|
||||
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
|
||||
clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
|
||||
clone git github.com/hashicorp/serf 7151adcef72687bf95f451a2e0ba15cb19412bf2
|
||||
clone git github.com/docker/libkv 60c7c881345b3c67defc7f93a8297debf041d43c
|
||||
clone git github.com/vishvananda/netns 493029407eeb434d0c2d44e02ea072ff2488d322
|
||||
clone git github.com/vishvananda/netlink 20397a138846e4d6590e01783ed023ed7e1c38a6
|
||||
clone git github.com/vishvananda/netlink 4b5dce31de6d42af5bb9811c6d265472199e0fec
|
||||
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
|
||||
clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
||||
clone git github.com/coreos/go-etcd v2.0.0
|
||||
clone git github.com/hashicorp/consul v0.5.2
|
||||
|
||||
# get graph and distribution packages
|
||||
clone git github.com/docker/distribution 7dc8d4a26b689bd4892f2f2322dbce0b7119d686
|
||||
clone git github.com/vbatts/tar-split v0.9.4
|
||||
clone git github.com/docker/distribution ec87e9b6971d831f0eff752ddb54fb64693e51cd # docker/1.8 branch
|
||||
clone git github.com/vbatts/tar-split v0.9.6
|
||||
|
||||
clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
|
||||
clone git github.com/endophage/gotuf 89ceb27829b9353dfee5ccccf7a3a9bb77008b05
|
||||
clone git github.com/endophage/gotuf a592b03b28b02bb29bb5878308fb1abed63383b5
|
||||
clone git github.com/tent/canonical-json-go 96e4ba3a7613a1216cbd1badca4efe382adea337
|
||||
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
|
||||
clone git github.com/opencontainers/runc v0.0.2 # libcontainer
|
||||
clone git github.com/opencontainers/runc v0.0.2.1 # libcontainer
|
||||
# libcontainer deps (see src/github.com/docker/libcontainer/update-vendor.sh)
|
||||
clone git github.com/coreos/go-systemd v2
|
||||
clone git github.com/godbus/dbus v2
|
||||
|
||||
@@ -2,7 +2,10 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -554,3 +557,23 @@ func (s *DockerSuite) TestPsFormatHeaders(c *check.C) {
|
||||
c.Fatalf(`Expected 'NAMES\ntest\n', got %v`, out)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) {
|
||||
config := `{
|
||||
"psFormat": "{{ .ID }} default"
|
||||
}`
|
||||
d, err := ioutil.TempDir("", "integration-cli-")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
out, _ := dockerCmd(c, "run", "--name=test", "-d", "busybox", "top")
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
out, _ = dockerCmd(c, "--config", d, "ps", "-q")
|
||||
if !strings.HasPrefix(id, strings.TrimSpace(out)) {
|
||||
c.Fatalf("Expected to print only the container id, got %v\n", out)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -369,3 +369,40 @@ func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test that pull continues after client has disconnected. #15589
|
||||
func (s *DockerTrustSuite) TestPullClientDisconnect(c *check.C) {
|
||||
testRequires(c, Network)
|
||||
|
||||
repoName := "hello-world:latest"
|
||||
|
||||
dockerCmdWithError(c, "rmi", repoName) // clean just in case
|
||||
|
||||
pullCmd := exec.Command(dockerBinary, "pull", repoName)
|
||||
|
||||
stdout, err := pullCmd.StdoutPipe()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = pullCmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// cancel as soon as we get some output
|
||||
buf := make([]byte, 10)
|
||||
_, err = stdout.Read(buf)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = pullCmd.Process.Kill()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
maxAttempts := 20
|
||||
for i := 0; ; i++ {
|
||||
if _, _, err := dockerCmdWithError(c, "inspect", repoName); err == nil {
|
||||
break
|
||||
}
|
||||
if i >= maxAttempts {
|
||||
c.Fatal("Timeout reached. Image was not pulled after client disconnected.")
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -60,31 +60,40 @@ func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
|
||||
|
||||
dockerCmd(c, "tag", "busybox", repoTag2)
|
||||
|
||||
out, _ := dockerCmd(c, "push", repoName)
|
||||
dockerCmd(c, "push", repoName)
|
||||
|
||||
// There should be no duplicate hashes in the output
|
||||
imageSuccessfullyPushed := ": Image successfully pushed"
|
||||
// Ensure layer list is equivalent for repoTag1 and repoTag2
|
||||
out1, _ := dockerCmd(c, "pull", repoTag1)
|
||||
if strings.Contains(out1, "Tag t1 not found") {
|
||||
c.Fatalf("Unable to pull pushed image: %s", out1)
|
||||
}
|
||||
imageAlreadyExists := ": Image already exists"
|
||||
imagePushHashes := make(map[string]struct{})
|
||||
outputLines := strings.Split(out, "\n")
|
||||
for _, outputLine := range outputLines {
|
||||
if strings.Contains(outputLine, imageSuccessfullyPushed) {
|
||||
hash := strings.TrimSuffix(outputLine, imageSuccessfullyPushed)
|
||||
if _, present := imagePushHashes[hash]; present {
|
||||
c.Fatalf("Duplicate image push: %s", outputLine)
|
||||
}
|
||||
imagePushHashes[hash] = struct{}{}
|
||||
} else if strings.Contains(outputLine, imageAlreadyExists) {
|
||||
hash := strings.TrimSuffix(outputLine, imageAlreadyExists)
|
||||
if _, present := imagePushHashes[hash]; present {
|
||||
c.Fatalf("Duplicate image push: %s", outputLine)
|
||||
}
|
||||
imagePushHashes[hash] = struct{}{}
|
||||
var out1Lines []string
|
||||
for _, outputLine := range strings.Split(out1, "\n") {
|
||||
if strings.Contains(outputLine, imageAlreadyExists) {
|
||||
out1Lines = append(out1Lines, outputLine)
|
||||
}
|
||||
}
|
||||
|
||||
if len(imagePushHashes) == 0 {
|
||||
c.Fatal(`Expected at least one line containing "Image successfully pushed"`)
|
||||
out2, _ := dockerCmd(c, "pull", repoTag2)
|
||||
if strings.Contains(out2, "Tag t2 not found") {
|
||||
c.Fatalf("Unable to pull pushed image: %s", out1)
|
||||
}
|
||||
var out2Lines []string
|
||||
for _, outputLine := range strings.Split(out2, "\n") {
|
||||
if strings.Contains(outputLine, imageAlreadyExists) {
|
||||
out1Lines = append(out1Lines, outputLine)
|
||||
}
|
||||
}
|
||||
|
||||
if len(out1Lines) != len(out2Lines) {
|
||||
c.Fatalf("Mismatched output length:\n%s\n%s", out1, out2)
|
||||
}
|
||||
|
||||
for i := range out1Lines {
|
||||
if out1Lines[i] != out2Lines[i] {
|
||||
c.Fatalf("Mismatched output line:\n%s\n%s", out1Lines[i], out2Lines[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ its own man page which explain usage and arguments.
|
||||
To see the man page for a command run **man docker <command>**.
|
||||
|
||||
# OPTIONS
|
||||
**-h**, **--help**
|
||||
**--help**
|
||||
Print usage statement
|
||||
|
||||
**--api-cors-header**=""
|
||||
|
||||
@@ -359,25 +359,18 @@ type blobs struct {
|
||||
distribution.BlobDeleter
|
||||
}
|
||||
|
||||
func sanitizeLocation(location, source string) (string, error) {
|
||||
func sanitizeLocation(location, base string) (string, error) {
|
||||
baseURL, err := url.Parse(base)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
locationURL, err := url.Parse(location)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if locationURL.Scheme == "" {
|
||||
sourceURL, err := url.Parse(source)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
locationURL = &url.URL{
|
||||
Scheme: sourceURL.Scheme,
|
||||
Host: sourceURL.Host,
|
||||
Path: location,
|
||||
}
|
||||
location = locationURL.String()
|
||||
}
|
||||
return location, nil
|
||||
return baseURL.ResolveReference(locationURL).String(), nil
|
||||
}
|
||||
|
||||
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/docker/libnetwork/netutils"
|
||||
"github.com/docker/libnetwork/options"
|
||||
"github.com/docker/libnetwork/portmapper"
|
||||
"github.com/docker/libnetwork/sandbox"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
@@ -544,6 +545,8 @@ func (d *driver) getNetworks() []*bridgeNetwork {
|
||||
func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) error {
|
||||
var err error
|
||||
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
// Sanity checks
|
||||
d.Lock()
|
||||
if _, ok := d.networks[id]; ok {
|
||||
@@ -695,6 +698,8 @@ func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) err
|
||||
func (d *driver) DeleteNetwork(nid types.UUID) error {
|
||||
var err error
|
||||
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
// Get network handler and remove it from driver
|
||||
d.Lock()
|
||||
n, ok := d.networks[nid]
|
||||
@@ -822,6 +827,8 @@ func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointIn
|
||||
err error
|
||||
)
|
||||
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
if epInfo == nil {
|
||||
return errors.New("invalid endpoint info passed")
|
||||
}
|
||||
@@ -1029,6 +1036,8 @@ func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointIn
|
||||
func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
|
||||
var err error
|
||||
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
// Get the network handler and make sure it exists
|
||||
d.Lock()
|
||||
n, ok := d.networks[nid]
|
||||
@@ -1168,6 +1177,8 @@ func (d *driver) EndpointOperInfo(nid, eid types.UUID) (map[string]interface{},
|
||||
|
||||
// Join method is invoked when a Sandbox is attached to an endpoint.
|
||||
func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
network, err := d.getNetwork(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1211,6 +1222,8 @@ func (d *driver) Join(nid, eid types.UUID, sboxKey string, jinfo driverapi.JoinI
|
||||
|
||||
// Leave method is invoked when a Sandbox detaches from an endpoint.
|
||||
func (d *driver) Leave(nid, eid types.UUID) error {
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
network, err := d.getNetwork(nid)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/libnetwork/netutils"
|
||||
"github.com/docker/libnetwork/sandbox"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
@@ -21,6 +22,8 @@ func validateID(nid, eid types.UUID) error {
|
||||
}
|
||||
|
||||
func createVethPair() (string, string, error) {
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
// Generate a name for what will be the host side pipe interface
|
||||
name1, err := netutils.GenerateIfaceName(vethPrefix, vethLen)
|
||||
if err != nil {
|
||||
@@ -45,6 +48,8 @@ func createVethPair() (string, string, error) {
|
||||
}
|
||||
|
||||
func createVxlan(vni uint32) (string, error) {
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
name, err := netutils.GenerateIfaceName("vxlan", 7)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error generating vxlan name: %v", err)
|
||||
@@ -68,6 +73,8 @@ func createVxlan(vni uint32) (string, error) {
|
||||
}
|
||||
|
||||
func deleteVxlan(name string) error {
|
||||
defer sandbox.InitOSContext()()
|
||||
|
||||
link, err := netlink.LinkByName(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find vxlan interface with name %s: %v", name, err)
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package netutils
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var runningInContainer = flag.Bool("incontainer", false, "Indicates if the test is running in a container")
|
||||
|
||||
// IsRunningInContainer returns whether the test is running inside a container.
|
||||
func IsRunningInContainer() bool {
|
||||
return (*runningInContainer)
|
||||
}
|
||||
|
||||
// SetupTestNetNS joins a new network namespace, and returns its associated
|
||||
// teardown function.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// defer SetupTestNetNS(t)()
|
||||
//
|
||||
func SetupTestNetNS(t *testing.T) func() {
|
||||
runtime.LockOSThread()
|
||||
if err := syscall.Unshare(syscall.CLONE_NEWNET); err != nil {
|
||||
t.Fatalf("Failed to enter netns: %v", err)
|
||||
}
|
||||
|
||||
fd, err := syscall.Open("/proc/self/ns/net", syscall.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to open netns file")
|
||||
}
|
||||
|
||||
return func() {
|
||||
if err := syscall.Close(fd); err != nil {
|
||||
t.Logf("Warning: netns closing failed (%v)", err)
|
||||
}
|
||||
runtime.UnlockOSThread()
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,8 @@ var (
|
||||
gpmWg sync.WaitGroup
|
||||
gpmCleanupPeriod = 60 * time.Second
|
||||
gpmChan = make(chan chan struct{})
|
||||
nsOnce sync.Once
|
||||
initNs netns.NsHandle
|
||||
)
|
||||
|
||||
// The networkNamespace type is the linux implementation of the Sandbox
|
||||
@@ -242,15 +244,37 @@ func (n *networkNamespace) InvokeFunc(f func()) error {
|
||||
})
|
||||
}
|
||||
|
||||
func nsInvoke(path string, prefunc func(nsFD int) error, postfunc func(callerFD int) error) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
func getLink() (string, error) {
|
||||
return os.Readlink(fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid()))
|
||||
}
|
||||
|
||||
origns, err := netns.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
func nsInit() {
|
||||
var err error
|
||||
|
||||
if initNs, err = netns.Get(); err != nil {
|
||||
log.Errorf("could not get initial namespace: %v", err)
|
||||
}
|
||||
defer origns.Close()
|
||||
}
|
||||
|
||||
// InitOSContext initializes OS context while configuring network resources
|
||||
func InitOSContext() func() {
|
||||
runtime.LockOSThread()
|
||||
nsOnce.Do(nsInit)
|
||||
if err := netns.Set(initNs); err != nil {
|
||||
linkInfo, linkErr := getLink()
|
||||
if linkErr != nil {
|
||||
linkInfo = linkErr.Error()
|
||||
}
|
||||
|
||||
log.Errorf("failed to set to initial namespace, %v, initns fd %d: %v",
|
||||
linkInfo, initNs, err)
|
||||
}
|
||||
|
||||
return runtime.UnlockOSThread
|
||||
}
|
||||
|
||||
func nsInvoke(path string, prefunc func(nsFD int) error, postfunc func(callerFD int) error) error {
|
||||
defer InitOSContext()()
|
||||
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
@@ -269,10 +293,10 @@ func nsInvoke(path string, prefunc func(nsFD int) error, postfunc func(callerFD
|
||||
if err = netns.Set(netns.NsHandle(nsFD)); err != nil {
|
||||
return err
|
||||
}
|
||||
defer netns.Set(origns)
|
||||
defer netns.Set(initNs)
|
||||
|
||||
// Invoked after the namespace switch.
|
||||
return postfunc(int(origns))
|
||||
return postfunc(int(initNs))
|
||||
}
|
||||
|
||||
func (n *networkNamespace) nsPath() string {
|
||||
|
||||
@@ -21,3 +21,8 @@ func NewSandbox(key string, osCreate bool) (Sandbox, error) {
|
||||
// and waits for it.
|
||||
func GC() {
|
||||
}
|
||||
|
||||
// InitOSContext initializes OS context while configuring network resources
|
||||
func InitOSContext() func() {
|
||||
return func() {}
|
||||
}
|
||||
|
||||
@@ -21,3 +21,8 @@ func NewSandbox(key string, osCreate bool) (Sandbox, error) {
|
||||
// and waits for it.
|
||||
func GC() {
|
||||
}
|
||||
|
||||
// InitOSContext initializes OS context while configuring network resources
|
||||
func InitOSContext() func() {
|
||||
return func() {}
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func (c *Client) update() error {
|
||||
if err != nil {
|
||||
// In this instance the root has not expired base on time, but is
|
||||
// expired based on the snapshot dictating a new root has been produced.
|
||||
logrus.Info(err.Error())
|
||||
logrus.Debug(err)
|
||||
return tuf.ErrLocalRootExpired{}
|
||||
}
|
||||
// will always need top level targets at a minimum
|
||||
|
||||
@@ -3,6 +3,7 @@ package fluent
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
@@ -33,7 +34,7 @@ type Config struct {
|
||||
|
||||
type Fluent struct {
|
||||
Config
|
||||
conn net.Conn
|
||||
conn io.WriteCloser
|
||||
pending []byte
|
||||
reconnecting bool
|
||||
mu sync.Mutex
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package fluent
|
||||
|
||||
const Version = "0.5.1"
|
||||
const Version = "1.0.0"
|
||||
|
||||
@@ -236,7 +236,7 @@ func getCgroupData(c *configs.Cgroup, pid int) (*data, error) {
|
||||
}
|
||||
|
||||
func (raw *data) parent(subsystem, mountpoint, src string) (string, error) {
|
||||
initPath, err := cgroups.GetInitCgroupDir(subsystem)
|
||||
initPath, err := cgroups.GetThisCgroupDir(subsystem)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -159,17 +159,19 @@ func (tr *Reader) Next() (*Header, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var b []byte
|
||||
var buf []byte
|
||||
if tr.RawAccounting {
|
||||
if _, err = tr.rawBytes.Write(realname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = tr.RawBytes()
|
||||
buf = make([]byte, tr.rawBytes.Len())
|
||||
copy(buf[:], tr.RawBytes())
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
// since the above call to Next() resets the buffer, we need to throw the bytes over
|
||||
if tr.RawAccounting {
|
||||
if _, err = tr.rawBytes.Write(b); err != nil {
|
||||
buf = append(buf, tr.RawBytes()...)
|
||||
if _, err = tr.rawBytes.Write(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -181,17 +183,19 @@ func (tr *Reader) Next() (*Header, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var b []byte
|
||||
var buf []byte
|
||||
if tr.RawAccounting {
|
||||
if _, err = tr.rawBytes.Write(realname); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = tr.RawBytes()
|
||||
buf = make([]byte, tr.rawBytes.Len())
|
||||
copy(buf[:], tr.RawBytes())
|
||||
}
|
||||
hdr, err := tr.Next()
|
||||
// since the above call to Next() resets the buffer, we need to throw the bytes over
|
||||
if tr.RawAccounting {
|
||||
if _, err = tr.rawBytes.Write(b); err != nil {
|
||||
buf = append(buf, tr.RawBytes()...)
|
||||
if _, err = tr.rawBytes.Write(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
)
|
||||
|
||||
// NewOutputTarStream returns an io.ReadCloser that is an assemble tar archive
|
||||
// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive
|
||||
// stream.
|
||||
//
|
||||
// It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
|
||||
@@ -62,7 +62,6 @@ func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadClose
|
||||
fh.Close()
|
||||
}
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
|
||||
// What to do here... folks will want their own access to the Reader that is
|
||||
// their tar archive stream, but we'll need that same stream to use our
|
||||
// forked 'archive/tar'.
|
||||
// Perhaps do an io.TeeReader that hand back an io.Reader for them to read
|
||||
// from, and we'll mitm the stream to store metadata.
|
||||
// Perhaps do an io.TeeReader that hands back an io.Reader for them to read
|
||||
// from, and we'll MITM the stream to store metadata.
|
||||
// We'll need a storage.FilePutter too ...
|
||||
|
||||
// Another concern, whether to do any storage.FilePutter operations, such that we
|
||||
@@ -32,7 +32,7 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
|
||||
// Perhaps we have a DiscardFilePutter that is a bit bucket.
|
||||
|
||||
// we'll return the pipe reader, since TeeReader does not buffer and will
|
||||
// only read what the outputRdr Read's. Since Tar archive's have padding on
|
||||
// only read what the outputRdr Read's. Since Tar archives have padding on
|
||||
// the end, we want to be the one reading the padding, even if the user's
|
||||
// `archive/tar` doesn't care.
|
||||
pR, pW := io.Pipe()
|
||||
@@ -55,13 +55,15 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
|
||||
}
|
||||
// even when an EOF is reached, there is often 1024 null bytes on
|
||||
// the end of an archive. Collect them too.
|
||||
_, err := p.AddEntry(storage.Entry{
|
||||
Type: storage.SegmentType,
|
||||
Payload: tr.RawBytes(),
|
||||
})
|
||||
if err != nil {
|
||||
pW.CloseWithError(err)
|
||||
return
|
||||
if b := tr.RawBytes(); len(b) > 0 {
|
||||
_, err := p.AddEntry(storage.Entry{
|
||||
Type: storage.SegmentType,
|
||||
Payload: b,
|
||||
})
|
||||
if err != nil {
|
||||
pW.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
break // not return. We need the end of the reader.
|
||||
}
|
||||
@@ -69,12 +71,15 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
|
||||
break // not return. We need the end of the reader.
|
||||
}
|
||||
|
||||
if _, err := p.AddEntry(storage.Entry{
|
||||
Type: storage.SegmentType,
|
||||
Payload: tr.RawBytes(),
|
||||
}); err != nil {
|
||||
pW.CloseWithError(err)
|
||||
return
|
||||
if b := tr.RawBytes(); len(b) > 0 {
|
||||
_, err := p.AddEntry(storage.Entry{
|
||||
Type: storage.SegmentType,
|
||||
Payload: b,
|
||||
})
|
||||
if err != nil {
|
||||
pW.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var csum []byte
|
||||
|
||||
@@ -5,7 +5,7 @@ Packing and unpacking the Entries of the stream. The types of streams are
|
||||
either segments of raw bytes (for the raw headers and various padding) and for
|
||||
an entry marking a file payload.
|
||||
|
||||
The raw bytes are stored precisely in the packed (marshalled) Entry. Where as
|
||||
The raw bytes are stored precisely in the packed (marshalled) Entry, whereas
|
||||
the file payload marker include the name of the file, size, and crc64 checksum
|
||||
(for basic file integrity).
|
||||
*/
|
||||
|
||||
@@ -19,11 +19,11 @@ const (
|
||||
// SegmentType represents a raw bytes segment from the archive stream. These raw
|
||||
// byte segments consist of the raw headers and various padding.
|
||||
//
|
||||
// It's payload is to be marshalled base64 encoded.
|
||||
// Its payload is to be marshalled base64 encoded.
|
||||
SegmentType
|
||||
)
|
||||
|
||||
// Entry is a the structure for packing and unpacking the information read from
|
||||
// Entry is the structure for packing and unpacking the information read from
|
||||
// the Tar archive.
|
||||
//
|
||||
// FileType Payload checksum is using `hash/crc64` for basic file integrity,
|
||||
@@ -32,8 +32,8 @@ const (
|
||||
// collisions in a sample of 18.2 million, CRC64 had none.
|
||||
type Entry struct {
|
||||
Type Type `json:"type"`
|
||||
Name string `json:"name",omitempty`
|
||||
Size int64 `json:"size",omitempty`
|
||||
Payload []byte `json:"payload"` // SegmentType store payload here; FileType store crc64 checksum here;
|
||||
Name string `json:"name,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
|
||||
Position int `json:"position"`
|
||||
}
|
||||
|
||||
@@ -5,14 +5,13 @@ import (
|
||||
"errors"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// FileGetter is the interface for getting a stream of a file payload, address
|
||||
// by name/filename. Presumably, the names will be scoped to relative file
|
||||
// paths.
|
||||
// FileGetter is the interface for getting a stream of a file payload,
|
||||
// addressed by name/filename. Presumably, the names will be scoped to relative
|
||||
// file paths.
|
||||
type FileGetter interface {
|
||||
// Get returns a stream for the provided file path
|
||||
Get(filename string) (output io.ReadCloser, err error)
|
||||
@@ -60,15 +59,15 @@ func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) {
|
||||
c := crc64.New(CRCTable)
|
||||
tRdr := io.TeeReader(r, c)
|
||||
b := bytes.NewBuffer([]byte{})
|
||||
i, err := io.Copy(b, tRdr)
|
||||
crc := crc64.New(CRCTable)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
cw := io.MultiWriter(crc, buf)
|
||||
i, err := io.Copy(cw, r)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
bfgp.files[name] = b.Bytes()
|
||||
return i, c.Sum(nil), nil
|
||||
bfgp.files[name] = buf.Bytes()
|
||||
return i, crc.Sum(nil), nil
|
||||
}
|
||||
|
||||
type readCloserWrapper struct {
|
||||
@@ -77,7 +76,7 @@ type readCloserWrapper struct {
|
||||
|
||||
func (w *readCloserWrapper) Close() error { return nil }
|
||||
|
||||
// NewBufferFileGetPutter is simple in memory FileGetPutter
|
||||
// NewBufferFileGetPutter is a simple in-memory FileGetPutter
|
||||
//
|
||||
// Implication is this is memory intensive...
|
||||
// Probably best for testing or light weight cases.
|
||||
@@ -97,8 +96,7 @@ type bitBucketFilePutter struct {
|
||||
|
||||
func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) {
|
||||
c := crc64.New(CRCTable)
|
||||
tRdr := io.TeeReader(r, c)
|
||||
i, err := io.Copy(ioutil.Discard, tRdr)
|
||||
i, err := io.Copy(c, r)
|
||||
return i, c.Sum(nil), err
|
||||
}
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// ErrDuplicatePath is occured when a tar archive has more than one entry for
|
||||
// the same file path
|
||||
// ErrDuplicatePath occurs when a tar archive has more than one entry for the
|
||||
// same file path
|
||||
var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
|
||||
|
||||
// Packer describes the methods to pack Entries to a storage destination
|
||||
@@ -65,7 +65,7 @@ func (jup *jsonUnpacker) Next() (*Entry, error) {
|
||||
if _, ok := jup.seen[cName]; ok {
|
||||
return nil, ErrDuplicatePath
|
||||
}
|
||||
jup.seen[cName] = emptyByte
|
||||
jup.seen[cName] = struct{}{}
|
||||
}
|
||||
|
||||
return &e, err
|
||||
@@ -90,11 +90,7 @@ type jsonPacker struct {
|
||||
seen seenNames
|
||||
}
|
||||
|
||||
type seenNames map[string]byte
|
||||
|
||||
// used in the seenNames map. byte is a uint8, and we'll re-use the same one
|
||||
// for minimalism.
|
||||
const emptyByte byte = 0
|
||||
type seenNames map[string]struct{}
|
||||
|
||||
func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
|
||||
// check early for dup name
|
||||
@@ -103,7 +99,7 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
|
||||
if _, ok := jp.seen[cName]; ok {
|
||||
return -1, ErrDuplicatePath
|
||||
}
|
||||
jp.seen[cName] = emptyByte
|
||||
jp.seen[cName] = struct{}{}
|
||||
}
|
||||
|
||||
e.Position = jp.pos
|
||||
@@ -117,7 +113,7 @@ func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
|
||||
return e.Position, nil
|
||||
}
|
||||
|
||||
// NewJSONPacker provides an Packer that writes each Entry (SegmentType and
|
||||
// NewJSONPacker provides a Packer that writes each Entry (SegmentType and
|
||||
// FileType) as a json document.
|
||||
//
|
||||
// The Entries are delimited by new line.
|
||||
|
||||
@@ -157,6 +157,7 @@ type Vxlan struct {
|
||||
L2miss bool
|
||||
L3miss bool
|
||||
NoAge bool
|
||||
GBP bool
|
||||
Age int
|
||||
Limit int
|
||||
Port int
|
||||
|
||||
@@ -73,10 +73,7 @@ func LinkSetMTU(link Link, mtu int) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
|
||||
|
||||
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
|
||||
msg.Type = syscall.RTM_SETLINK
|
||||
msg.Flags = syscall.NLM_F_REQUEST
|
||||
msg.Index = int32(base.Index)
|
||||
msg.Change = syscall.IFLA_MTU
|
||||
req.AddData(msg)
|
||||
|
||||
b := make([]byte, 4)
|
||||
@@ -97,10 +94,7 @@ func LinkSetName(link Link, name string) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
|
||||
|
||||
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
|
||||
msg.Type = syscall.RTM_SETLINK
|
||||
msg.Flags = syscall.NLM_F_REQUEST
|
||||
msg.Index = int32(base.Index)
|
||||
msg.Change = syscall.IFLA_IFNAME
|
||||
req.AddData(msg)
|
||||
|
||||
data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name))
|
||||
@@ -118,10 +112,7 @@ func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
|
||||
|
||||
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
|
||||
msg.Type = syscall.RTM_SETLINK
|
||||
msg.Flags = syscall.NLM_F_REQUEST
|
||||
msg.Index = int32(base.Index)
|
||||
msg.Change = syscall.IFLA_ADDRESS
|
||||
req.AddData(msg)
|
||||
|
||||
data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr))
|
||||
@@ -151,10 +142,7 @@ func LinkSetMasterByIndex(link Link, masterIndex int) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
|
||||
|
||||
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
|
||||
msg.Type = syscall.RTM_SETLINK
|
||||
msg.Flags = syscall.NLM_F_REQUEST
|
||||
msg.Index = int32(base.Index)
|
||||
msg.Change = syscall.IFLA_MASTER
|
||||
req.AddData(msg)
|
||||
|
||||
b := make([]byte, 4)
|
||||
@@ -176,10 +164,7 @@ func LinkSetNsPid(link Link, nspid int) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
|
||||
|
||||
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
|
||||
msg.Type = syscall.RTM_SETLINK
|
||||
msg.Flags = syscall.NLM_F_REQUEST
|
||||
msg.Index = int32(base.Index)
|
||||
msg.Change = syscall.IFLA_NET_NS_PID
|
||||
req.AddData(msg)
|
||||
|
||||
b := make([]byte, 4)
|
||||
@@ -201,10 +186,7 @@ func LinkSetNsFd(link Link, fd int) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
|
||||
|
||||
msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
|
||||
msg.Type = syscall.RTM_SETLINK
|
||||
msg.Flags = syscall.NLM_F_REQUEST
|
||||
msg.Index = int32(base.Index)
|
||||
msg.Change = nl.IFLA_NET_NS_FD
|
||||
req.AddData(msg)
|
||||
|
||||
b := make([]byte, 4)
|
||||
@@ -266,6 +248,10 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
|
||||
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss))
|
||||
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss))
|
||||
|
||||
if vxlan.GBP {
|
||||
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, boolAttr(vxlan.GBP))
|
||||
}
|
||||
|
||||
if vxlan.NoAge {
|
||||
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
|
||||
} else if vxlan.Age > 0 {
|
||||
@@ -627,10 +613,7 @@ func setProtinfoAttr(link Link, mode bool, attr int) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
|
||||
|
||||
msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
|
||||
msg.Type = syscall.RTM_SETLINK
|
||||
msg.Flags = syscall.NLM_F_REQUEST
|
||||
msg.Index = int32(base.Index)
|
||||
msg.Change = syscall.IFLA_PROTINFO | syscall.NLA_F_NESTED
|
||||
req.AddData(msg)
|
||||
|
||||
br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil)
|
||||
@@ -683,6 +666,8 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) {
|
||||
vxlan.L2miss = int8(datum.Value[0]) != 0
|
||||
case nl.IFLA_VXLAN_L3MISS:
|
||||
vxlan.L3miss = int8(datum.Value[0]) != 0
|
||||
case nl.IFLA_VXLAN_GBP:
|
||||
vxlan.GBP = int8(datum.Value[0]) != 0
|
||||
case nl.IFLA_VXLAN_AGEING:
|
||||
vxlan.Age = int(native.Uint32(datum.Value[0:4]))
|
||||
vxlan.NoAge = vxlan.Age == 0
|
||||
|
||||
@@ -47,7 +47,15 @@ const (
|
||||
IFLA_VXLAN_PORT
|
||||
IFLA_VXLAN_GROUP6
|
||||
IFLA_VXLAN_LOCAL6
|
||||
IFLA_VXLAN_MAX = IFLA_VXLAN_LOCAL6
|
||||
IFLA_VXLAN_UDP_CSUM
|
||||
IFLA_VXLAN_UDP_ZERO_CSUM6_TX
|
||||
IFLA_VXLAN_UDP_ZERO_CSUM6_RX
|
||||
IFLA_VXLAN_REMCSUM_TX
|
||||
IFLA_VXLAN_REMCSUM_RX
|
||||
IFLA_VXLAN_GBP
|
||||
IFLA_VXLAN_REMCSUM_NOPARTIAL
|
||||
IFLA_VXLAN_FLOWBASED
|
||||
IFLA_VXLAN_MAX = IFLA_VXLAN_FLOWBASED
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -39,8 +39,9 @@ func NativeEndian() binary.ByteOrder {
|
||||
var x uint32 = 0x01020304
|
||||
if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
|
||||
nativeEndian = binary.BigEndian
|
||||
} else {
|
||||
nativeEndian = binary.LittleEndian
|
||||
}
|
||||
nativeEndian = binary.LittleEndian
|
||||
}
|
||||
return nativeEndian
|
||||
}
|
||||
|
||||
@@ -20,6 +20,15 @@ func NewRtMsg() *RtMsg {
|
||||
}
|
||||
}
|
||||
|
||||
func NewRtDelMsg() *RtMsg {
|
||||
return &RtMsg{
|
||||
RtMsg: syscall.RtMsg{
|
||||
Table: syscall.RT_TABLE_MAIN,
|
||||
Scope: syscall.RT_SCOPE_NOWHERE,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *RtMsg) Len() int {
|
||||
return syscall.SizeofRtMsg
|
||||
}
|
||||
|
||||
@@ -14,22 +14,21 @@ import (
|
||||
// Equivalent to: `ip route add $route`
|
||||
func RouteAdd(route *Route) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK)
|
||||
return routeHandle(route, req)
|
||||
return routeHandle(route, req, nl.NewRtMsg())
|
||||
}
|
||||
|
||||
// RouteAdd will delete a route from the system.
|
||||
// Equivalent to: `ip route del $route`
|
||||
func RouteDel(route *Route) error {
|
||||
req := nl.NewNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK)
|
||||
return routeHandle(route, req)
|
||||
return routeHandle(route, req, nl.NewRtDelMsg())
|
||||
}
|
||||
|
||||
func routeHandle(route *Route, req *nl.NetlinkRequest) error {
|
||||
func routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error {
|
||||
if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil {
|
||||
return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil")
|
||||
}
|
||||
|
||||
msg := nl.NewRtMsg()
|
||||
msg.Scope = uint8(route.Scope)
|
||||
family := -1
|
||||
var rtAttrs []*nl.RtAttr
|
||||
|
||||
Reference in New Issue
Block a user