mirror of
https://github.com/moby/moby.git
synced 2026-01-14 02:28:07 +00:00
Compare commits
53 Commits
master
...
v1.8.0-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a9d4eaa6e | ||
|
|
5851e2da60 | ||
|
|
9eff33735a | ||
|
|
fc7697b050 | ||
|
|
1bf8954d0d | ||
|
|
dfd9f5989a | ||
|
|
d9581e861d | ||
|
|
c383ceaf37 | ||
|
|
948912f692 | ||
|
|
d19b1b927b | ||
|
|
53f5905379 | ||
|
|
60cbf4da6c | ||
|
|
183628388c | ||
|
|
fbd2267e7d | ||
|
|
a16ab243e5 | ||
|
|
b3c3c4cddc | ||
|
|
0fe5aad984 | ||
|
|
0f5e2fd479 | ||
|
|
2f7145b1c5 | ||
|
|
81efe1f32e | ||
|
|
5ba75ac343 | ||
|
|
290987fcb4 | ||
|
|
98855c863d | ||
|
|
b1f394a247 | ||
|
|
a819a60a94 | ||
|
|
33cdc7f2c4 | ||
|
|
117860577c | ||
|
|
b0ac5df367 | ||
|
|
c109095a58 | ||
|
|
d394113dfe | ||
|
|
2af7f63173 | ||
|
|
f156fb7be5 | ||
|
|
559043b953 | ||
|
|
ba8abcb3dd | ||
|
|
ebf396c6e8 | ||
|
|
47d52fb872 | ||
|
|
d167338876 | ||
|
|
e6844381f0 | ||
|
|
589922adf0 | ||
|
|
689c4e6075 | ||
|
|
43da1adedb | ||
|
|
686fe02020 | ||
|
|
1d02be1c7a | ||
|
|
edb60b950a | ||
|
|
e0e852ee6f | ||
|
|
b537508f8c | ||
|
|
37e886eb7b | ||
|
|
50f65742ef | ||
|
|
56d859d052 | ||
|
|
546a704c63 | ||
|
|
fa85dc0030 | ||
|
|
36b6e5884d | ||
|
|
90991ddb9b |
86
CHANGELOG.md
86
CHANGELOG.md
@@ -1,5 +1,91 @@
|
||||
# Changelog
|
||||
|
||||
## 1.8.0 (2015-07-24)
|
||||
|
||||
### Distribution
|
||||
|
||||
+ Trusted pull, push and build, disabled by default
|
||||
* Make tar layers deterministic between registries
|
||||
* Don't allow deleting the image of running containers
|
||||
* Check if a tag name to load is a valid digest
|
||||
* Allow one character repository names
|
||||
* Add a more accurate error description for invalid tag name
|
||||
* Make build cache ignore mtime
|
||||
|
||||
### Cli
|
||||
|
||||
+ Add support for DOCKER_CONFIG/--config to specify config file dir
|
||||
+ Add --type flag for docker inspect command
|
||||
+ Add formatting options to `docker ps` with `--format`
|
||||
+ Replace `docker -d` with new subcommand `docker daemon`
|
||||
* Zsh completion updates and improvements
|
||||
* Add some missing events to bash completion
|
||||
* Support daemon urls with base paths in `docker -H`
|
||||
* Validate status= filter to docker ps
|
||||
* Display when a container is in --net=host in docker ps
|
||||
* Extend docker inspect to export image metadata related to graph driver
|
||||
* Restore --default-gateway{,-v6} daemon options
|
||||
* Add missing unpublished ports in docker ps
|
||||
* Allow duration strings in `docker events` as --since/--until
|
||||
* Expose more mounts information in `docker inspect`
|
||||
|
||||
### Runtime
|
||||
|
||||
+ Add new Fluentd logging driver
|
||||
+ Allow `docker import` to load from local files
|
||||
+ Add logging driver for GELF via UDP
|
||||
+ Allow to copy files from host to containers with `docker cp`
|
||||
+ Promote volume drivers from experimental to master
|
||||
+ Add rollover log driver, and --log-driver-opts flag
|
||||
+ Add memory swappiness tuning options
|
||||
* Remove cgroup read-only flag when privileged
|
||||
* Make /proc, /sys, & /dev readonly for readonly containers
|
||||
* Add cgroup bind mount by default
|
||||
* Overlay: Export metadata for container and image in `docker inspect`
|
||||
* Devicemapper: external device activation
|
||||
* Devicemapper: Compare uuid of base device on startup
|
||||
* Remove RC4 from the list of registry cipher suites
|
||||
* Add syslog-facility option
|
||||
* LXC execdriver compatibility with recent LXC versions
|
||||
|
||||
|
||||
### Plugins
|
||||
|
||||
* Separate plugin sockets and specs locations
|
||||
* Allow TLS connections to plugins
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Add missing 'Names' field to /containers/json API output
|
||||
- Make `docker rmi --dangling` safe when pulling
|
||||
- Devicemapper: Change default basesize to 100G
|
||||
- Go Scheduler issue with sync.Mutex and gcc
|
||||
- Fix issue where Search API endpoint would panic due to empty AuthConfig
|
||||
- Set image canonical names correctly
|
||||
- Check dockerinit only if lxc driver is used
|
||||
- Fix ulimit usage of nproc
|
||||
- Always attach STDIN if -i,--interactive is specified
|
||||
- Show error messages when saving container state fails
|
||||
- Fixed incorrect assumption on --bridge=none treated as disable network
|
||||
- Check for invalid port specifications in host configuration
|
||||
- Fix endpoint leave failure for --net=host mode
|
||||
- Fix goroutine leak in the stats API if the container is not running
|
||||
- Check for apparmor file before reading it
|
||||
- Fix DOCKER_TLS_VERIFY being ignored
|
||||
- Set umask to the default on startup
|
||||
- Correct the message of pause and unpause a non-running container
|
||||
- Adjust disallowed CpuShares in container creation
|
||||
- ZFS: correctly apply selinux context
|
||||
- Display empty string instead of <nil> when IP opt is nil
|
||||
- `docker kill` returns error when container is not running
|
||||
- Fix COPY/ADD quoted/json form
|
||||
- Fix goroutine leak on logs -f with no output
|
||||
- Remove panic in nat package on invalid hostport
|
||||
- Fix container linking in Fedora 22
|
||||
- Fix error caused using default gateways outside of the allocated range
|
||||
- Format times in inspect command with a template as RFC3339Nano
|
||||
- Make registry client to accept 2xx and 3xx http status responses as successful
|
||||
|
||||
## 1.7.1 (2015-07-14)
|
||||
|
||||
#### Runtime
|
||||
|
||||
@@ -137,7 +137,7 @@ RUN set -x \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary server
|
||||
ENV NOTARY_COMMIT 77bced079e83d80f40c1f0a544b1a8a3b97fb052
|
||||
ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
|
||||
@@ -13,7 +13,7 @@ databases, and backend services without depending on a particular stack
|
||||
or provider.
|
||||
|
||||
Docker began as an open-source implementation of the deployment engine which
|
||||
powers [dotCloud](https://dotcloud.com), a popular Platform-as-a-Service.
|
||||
powers [dotCloud](https://www.dotcloud.com), a popular Platform-as-a-Service.
|
||||
It benefits directly from the experience accumulated over several years
|
||||
of large-scale operation and support of hundreds of thousands of
|
||||
applications and databases.
|
||||
|
||||
@@ -115,8 +115,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
||||
}
|
||||
|
||||
// Resolve the FROM lines in the Dockerfile to trusted digest references
|
||||
// using Notary.
|
||||
newDockerfile, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
|
||||
// using Notary. On a successful build, we must tag the resolved digests
|
||||
// to the original name specified in the Dockerfile.
|
||||
newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to process Dockerfile: %v", err)
|
||||
}
|
||||
@@ -291,7 +292,20 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
||||
}
|
||||
return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||
}
|
||||
return err
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the build was successful, now we must tag any of the resolved
|
||||
// images from the above Dockerfile rewrite.
|
||||
for _, resolved := range resolvedTags {
|
||||
if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDockerfileRelPath uses the given context directory for a `docker build`
|
||||
@@ -302,6 +316,22 @@ func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDi
|
||||
return "", "", fmt.Errorf("unable to get absolute context directory: %v", err)
|
||||
}
|
||||
|
||||
// The context dir might be a symbolic link, so follow it to the actual
|
||||
// target directory.
|
||||
absContextDir, err = filepath.EvalSymlinks(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err)
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err)
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
return "", "", fmt.Errorf("context must be a directory: %s", absContextDir)
|
||||
}
|
||||
|
||||
absDockerfile := givenDockerfile
|
||||
if absDockerfile == "" {
|
||||
// No -f/--file was specified so use the default relative to the
|
||||
@@ -467,14 +497,21 @@ func (td *trustedDockerfile) Close() error {
|
||||
return os.Remove(td.File.Name())
|
||||
}
|
||||
|
||||
// resolvedTag records the repository, tag, and resolved digest reference
|
||||
// from a Dockerfile rewrite.
|
||||
type resolvedTag struct {
|
||||
repoInfo *registry.RepositoryInfo
|
||||
digestRef, tagRef registry.Reference
|
||||
}
|
||||
|
||||
// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in
|
||||
// "FROM <image>" instructions to a digest reference. `translator` is a
|
||||
// function that takes a repository name and tag reference and returns a
|
||||
// trusted digest reference.
|
||||
func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, err error) {
|
||||
func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) {
|
||||
dockerfile, err := os.Open(dockerfileName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open Dockerfile: %v", err)
|
||||
return nil, nil, fmt.Errorf("unable to open Dockerfile: %v", err)
|
||||
}
|
||||
defer dockerfile.Close()
|
||||
|
||||
@@ -483,7 +520,7 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist
|
||||
// Make a tempfile to store the rewritten Dockerfile.
|
||||
tempFile, err := ioutil.TempFile("", "trusted-dockerfile-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err)
|
||||
return nil, nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err)
|
||||
}
|
||||
|
||||
trustedFile := &trustedDockerfile{
|
||||
@@ -509,21 +546,32 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist
|
||||
if tag == "" {
|
||||
tag = tags.DEFAULTTAG
|
||||
}
|
||||
|
||||
repoInfo, err := registry.ParseRepositoryInfo(repo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to parse repository info: %v", err)
|
||||
}
|
||||
|
||||
ref := registry.ParseReference(tag)
|
||||
|
||||
if !ref.HasDigest() && isTrusted() {
|
||||
trustedRef, err := translator(repo, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.ImageName(repo)))
|
||||
resolvedTags = append(resolvedTags, &resolvedTag{
|
||||
repoInfo: repoInfo,
|
||||
digestRef: trustedRef,
|
||||
tagRef: ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
n, err := fmt.Fprintln(tempFile, line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
trustedFile.size += int64(n)
|
||||
@@ -531,7 +579,7 @@ func rewriteDockerfileFrom(dockerfileName string, translator func(string, regist
|
||||
|
||||
tempFile.Seek(0, os.SEEK_SET)
|
||||
|
||||
return trustedFile, scanner.Err()
|
||||
return trustedFile, resolvedTags, scanner.Err()
|
||||
}
|
||||
|
||||
// replaceDockerfileTarWrapper wraps the given input tar archive stream and
|
||||
|
||||
@@ -232,6 +232,20 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
// Prepare destination copy info by stat-ing the container path.
|
||||
dstInfo := archive.CopyInfo{Path: dstPath}
|
||||
dstStat, err := cli.statContainerPath(dstContainer, dstPath)
|
||||
|
||||
// If the destination is a symbolic link, we should evaluate it.
|
||||
if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
|
||||
linkTarget := dstStat.LinkTarget
|
||||
if !filepath.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := archive.SplitPathDirEntry(dstPath)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
dstInfo.Path = linkTarget
|
||||
dstStat, err = cli.statContainerPath(dstContainer, linkTarget)
|
||||
}
|
||||
|
||||
// Ignore any error and assume that the parent directory of the destination
|
||||
// path exists, in which case the copy may still succeed. If there is any
|
||||
// type of conflict (e.g., non-directory overwriting an existing directory
|
||||
@@ -242,15 +256,26 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir()
|
||||
}
|
||||
|
||||
var content io.Reader
|
||||
var (
|
||||
content io.Reader
|
||||
resolvedDstPath string
|
||||
)
|
||||
|
||||
if srcPath == "-" {
|
||||
// Use STDIN.
|
||||
content = os.Stdin
|
||||
resolvedDstPath = dstInfo.Path
|
||||
if !dstInfo.IsDir {
|
||||
return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath))
|
||||
}
|
||||
} else {
|
||||
srcArchive, err := archive.TarResource(srcPath)
|
||||
// Prepare source copy info.
|
||||
srcInfo, err := archive.CopyInfoSourcePath(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -262,12 +287,6 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
// it to the specified directory in the container we get the disired
|
||||
// copy behavior.
|
||||
|
||||
// Prepare source copy info.
|
||||
srcInfo, err := archive.CopyInfoStatPath(srcPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// See comments in the implementation of `archive.PrepareArchiveCopy`
|
||||
// for exactly what goes into deciding how and whether the source
|
||||
// archive needs to be altered for the correct copy behavior when it is
|
||||
@@ -280,12 +299,12 @@ func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
|
||||
}
|
||||
defer preparedArchive.Close()
|
||||
|
||||
dstPath = dstDir
|
||||
resolvedDstPath = dstDir
|
||||
content = preparedArchive
|
||||
}
|
||||
|
||||
query := make(url.Values, 2)
|
||||
query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
|
||||
query.Set("path", filepath.ToSlash(resolvedDstPath)) // Normalize the paths used in the API.
|
||||
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
|
||||
query.Set("noOverwriteDirNonDir", "true")
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -176,11 +177,16 @@ func convertTarget(t client.Target) (target, error) {
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever {
|
||||
baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out)
|
||||
aliasMap := map[string]string{
|
||||
"root": "offline",
|
||||
"snapshot": "tagging",
|
||||
"targets": "tagging",
|
||||
}
|
||||
baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap)
|
||||
env := map[string]string{
|
||||
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
|
||||
"targets": os.Getenv("DOCKER_CONTENT_TRUST_TARGET_PASSPHRASE"),
|
||||
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_SNAPSHOT_PASSPHRASE"),
|
||||
"root": os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"),
|
||||
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"),
|
||||
"targets": os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"),
|
||||
}
|
||||
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
if v := env[alias]; v != "" {
|
||||
@@ -311,6 +317,22 @@ func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registr
|
||||
return nil
|
||||
}
|
||||
|
||||
func selectKey(keys map[string]string) string {
|
||||
if len(keys) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keyIDs := []string{}
|
||||
for k := range keys {
|
||||
keyIDs = append(keyIDs, k)
|
||||
}
|
||||
|
||||
// TODO(dmcgowan): let user choose if multiple keys, now pick consistently
|
||||
sort.Strings(keyIDs)
|
||||
|
||||
return keyIDs[0]
|
||||
}
|
||||
|
||||
func targetStream(in io.Writer) (io.WriteCloser, <-chan []target) {
|
||||
r, w := io.Pipe()
|
||||
out := io.MultiWriter(in, w)
|
||||
@@ -409,16 +431,13 @@ func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string,
|
||||
|
||||
ks := repo.KeyStoreManager
|
||||
keys := ks.RootKeyStore().ListKeys()
|
||||
var rootKey string
|
||||
|
||||
if len(keys) == 0 {
|
||||
rootKey := selectKey(keys)
|
||||
if rootKey == "" {
|
||||
rootKey, err = ks.GenRootKey("ecdsa")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// TODO(dmcgowan): let user choose
|
||||
rootKey = keys[0]
|
||||
}
|
||||
|
||||
cryptoService, err := ks.GetRootCryptoService(rootKey)
|
||||
|
||||
@@ -298,7 +298,13 @@ func (s *Server) postContainersKill(version version.Version, w http.ResponseWrit
|
||||
}
|
||||
|
||||
if err := s.daemon.ContainerKill(name, sig); err != nil {
|
||||
return err
|
||||
_, isStopped := err.(daemon.ErrContainerNotRunning)
|
||||
// Return error that's not caused because the container is stopped.
|
||||
// Return error if the container is not running and the api is >= 1.20
|
||||
// to keep backwards compatibility.
|
||||
if version.GreaterThanOrEqualTo("1.20") || !isStopped {
|
||||
return fmt.Errorf("Cannot kill container %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
||||
@@ -109,7 +109,7 @@ func allocateDaemonPort(addr string) error {
|
||||
|
||||
func adjustCpuShares(version version.Version, hostConfig *runconfig.HostConfig) {
|
||||
if version.LessThan("1.19") {
|
||||
if hostConfig.CpuShares > 0 {
|
||||
if hostConfig != nil && hostConfig.CpuShares > 0 {
|
||||
// Handle unsupported CpuShares
|
||||
if hostConfig.CpuShares < linuxMinCpuShares {
|
||||
logrus.Warnf("Changing requested CpuShares of %d to minimum allowed of %d", hostConfig.CpuShares, linuxMinCpuShares)
|
||||
|
||||
@@ -86,7 +86,7 @@ type ImageInspect struct {
|
||||
Id string
|
||||
Parent string
|
||||
Comment string
|
||||
Created time.Time
|
||||
Created string
|
||||
Container string
|
||||
ContainerConfig *runconfig.Config
|
||||
DockerVersion string
|
||||
@@ -130,14 +130,13 @@ type CopyConfig struct {
|
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET /containers/{name:.*}/archive
|
||||
// "name" is the file or directory name.
|
||||
// "path" is the absolute path to the resource in the container.
|
||||
// "name" is basename of the resource.
|
||||
type ContainerPathStat struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Mode os.FileMode `json:"mode"`
|
||||
Mtime time.Time `json:"mtime"`
|
||||
LinkTarget string `json:"linkTarget"`
|
||||
}
|
||||
|
||||
// GET "/containers/{name:.*}/top"
|
||||
@@ -215,14 +214,14 @@ type ContainerState struct {
|
||||
Pid int
|
||||
ExitCode int
|
||||
Error string
|
||||
StartedAt time.Time
|
||||
FinishedAt time.Time
|
||||
StartedAt string
|
||||
FinishedAt string
|
||||
}
|
||||
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSONBase struct {
|
||||
Id string
|
||||
Created time.Time
|
||||
Created string
|
||||
Path string
|
||||
Args []string
|
||||
State *ContainerState
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
#include <tunables/global>
|
||||
|
||||
profile docker-default flags=(attach_disconnected,mediate_deleted) {
|
||||
#include <abstractions/base>
|
||||
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
deny @{PROC}/sys/fs/** wklx,
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
|
||||
deny @{PROC}/sys/kernel/*/** wklx,
|
||||
|
||||
deny mount,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/efi/efivars/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
@{DOCKER_GRAPH_PATH}=/var/lib/docker
|
||||
|
||||
profile /usr/bin/docker (attach_disconnected) {
|
||||
profile /usr/bin/docker (attach_disconnected, complain) {
|
||||
# Prevent following links to these files during container setup.
|
||||
deny /etc/** mkl,
|
||||
deny /dev/** kl,
|
||||
@@ -21,51 +21,131 @@ profile /usr/bin/docker (attach_disconnected) {
|
||||
ipc rw,
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
owner /** rw,
|
||||
/var/lib/docker/** rwl,
|
||||
|
||||
# For non-root client use:
|
||||
/dev/urandom r,
|
||||
/run/docker.sock rw,
|
||||
/proc/** r,
|
||||
/sys/kernel/mm/hugepages/ r,
|
||||
/etc/localtime r,
|
||||
|
||||
ptrace peer=@{profile_name},
|
||||
ptrace (read) peer=docker-default,
|
||||
deny ptrace (trace) peer=docker-default,
|
||||
deny ptrace peer=/usr/bin/docker///bin/ps,
|
||||
|
||||
/usr/bin/docker pix,
|
||||
/sbin/xtables-multi rCix,
|
||||
/sbin/xtables-multi rCx,
|
||||
/sbin/iptables rCx,
|
||||
/sbin/modprobe rCx,
|
||||
/sbin/auplink rCx,
|
||||
/bin/kmod rCx,
|
||||
/usr/bin/xz rCx,
|
||||
/bin/ps rCx,
|
||||
/bin/cat rCx,
|
||||
/sbin/zfs rCx,
|
||||
|
||||
# Transitions
|
||||
change_profile -> docker-*,
|
||||
change_profile -> unconfined,
|
||||
|
||||
profile /sbin/iptables {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability net_admin,
|
||||
}
|
||||
profile /sbin/auplink flags=(attach_disconnected) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_admin,
|
||||
capability dac_override,
|
||||
profile /bin/cat (complain) {
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/dev/null rw,
|
||||
/proc r,
|
||||
/bin/cat mr,
|
||||
|
||||
@{DOCKER_GRAPH_PATH}/aufs/** rw,
|
||||
# For user namespaces:
|
||||
@{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw,
|
||||
|
||||
# The following may be removed via delegates
|
||||
/sys/fs/aufs/** r,
|
||||
/lib/** r,
|
||||
/apparmor/.null r,
|
||||
/dev/null rw,
|
||||
/etc/ld.so.cache r,
|
||||
/sbin/auplink rm,
|
||||
/proc/fs/aufs/** rw,
|
||||
/proc/[0-9]*/mounts rw,
|
||||
# For reading in 'docker stats':
|
||||
/proc/[0-9]*/net/dev r,
|
||||
}
|
||||
profile /sbin/modprobe {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_module,
|
||||
file,
|
||||
profile /bin/ps (complain) {
|
||||
/etc/ld.so.cache r,
|
||||
/etc/localtime r,
|
||||
/etc/passwd r,
|
||||
/etc/nsswitch.conf r,
|
||||
/lib/** r,
|
||||
/proc/[0-9]*/** r,
|
||||
/dev/null rw,
|
||||
/bin/ps mr,
|
||||
|
||||
# We don't need ptrace so we'll deny and ignore the error.
|
||||
deny ptrace (read, trace),
|
||||
|
||||
# Quiet dac_override denials
|
||||
deny capability dac_override,
|
||||
deny capability dac_read_search,
|
||||
deny capability sys_ptrace,
|
||||
|
||||
/dev/tty r,
|
||||
/proc/stat r,
|
||||
/proc/cpuinfo r,
|
||||
/proc/meminfo r,
|
||||
/proc/uptime r,
|
||||
/sys/devices/system/cpu/online r,
|
||||
/proc/sys/kernel/pid_max r,
|
||||
/proc/ r,
|
||||
/proc/tty/drivers r,
|
||||
}
|
||||
profile /sbin/iptables (complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability net_admin,
|
||||
}
|
||||
profile /sbin/auplink flags=(attach_disconnected, complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_admin,
|
||||
capability dac_override,
|
||||
|
||||
@{DOCKER_GRAPH_PATH}/aufs/** rw,
|
||||
@{DOCKER_GRAPH_PATH}/tmp/** rw,
|
||||
# For user namespaces:
|
||||
@{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw,
|
||||
|
||||
/sys/fs/aufs/** r,
|
||||
/lib/** r,
|
||||
/apparmor/.null r,
|
||||
/dev/null rw,
|
||||
/etc/ld.so.cache r,
|
||||
/sbin/auplink rm,
|
||||
/proc/fs/aufs/** rw,
|
||||
/proc/[0-9]*/mounts rw,
|
||||
}
|
||||
profile /sbin/modprobe /bin/kmod (complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
capability sys_module,
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/dev/null rw,
|
||||
/apparmor/.null rw,
|
||||
/sbin/modprobe rm,
|
||||
/bin/kmod rm,
|
||||
/proc/cmdline r,
|
||||
/sys/module/** r,
|
||||
/etc/modprobe.d{/,/**} r,
|
||||
}
|
||||
# xz works via pipes, so we do not need access to the filesystem.
|
||||
profile /usr/bin/xz {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
profile /usr/bin/xz (complain) {
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/usr/bin/xz rm,
|
||||
deny /proc/** rw,
|
||||
deny /sys/** rw,
|
||||
}
|
||||
profile /sbin/xtables-multi (attach_disconnected, complain) {
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** r,
|
||||
/sbin/xtables-multi rm,
|
||||
/apparmor/.null w,
|
||||
/dev/null rw,
|
||||
capability net_raw,
|
||||
capability net_admin,
|
||||
network raw,
|
||||
}
|
||||
profile /sbin/zfs (attach_disconnected, complain) {
|
||||
file,
|
||||
capability,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,6 +295,10 @@ __docker_complete_log_driver_options() {
|
||||
return 1
|
||||
}
|
||||
|
||||
__docker_log_levels() {
|
||||
COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) )
|
||||
}
|
||||
|
||||
# a selection of the available signals that is most likely of interest in the
|
||||
# context of docker containers.
|
||||
__docker_signals() {
|
||||
@@ -312,49 +316,20 @@ __docker_signals() {
|
||||
COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) )
|
||||
}
|
||||
|
||||
# global options that may appear after the docker command
|
||||
_docker_docker() {
|
||||
local boolean_options="
|
||||
--daemon -d
|
||||
--debug -D
|
||||
$global_boolean_options
|
||||
--help -h
|
||||
--icc
|
||||
--ip-forward
|
||||
--ip-masq
|
||||
--iptables
|
||||
--ipv6
|
||||
--selinux-enabled
|
||||
--tls
|
||||
--tlsverify
|
||||
--userland-proxy=false
|
||||
--version -v
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
--exec-root|--graph|-g)
|
||||
_filedir -d
|
||||
return
|
||||
;;
|
||||
--log-driver)
|
||||
__docker_log_drivers
|
||||
return
|
||||
;;
|
||||
--log-level|-l)
|
||||
COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) )
|
||||
__docker_log_levels
|
||||
return
|
||||
;;
|
||||
--log-opt)
|
||||
__docker_log_driver_options
|
||||
return
|
||||
;;
|
||||
--pidfile|-p|--tlscacert|--tlscert|--tlskey)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
--storage-driver|-s)
|
||||
COMPREPLY=( $( compgen -W "aufs devicemapper btrfs overlay" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) )
|
||||
return
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
$(__docker_to_extglob "$global_options_with_args") )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
@@ -363,10 +338,10 @@ _docker_docker() {
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $main_options_with_args" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter="$(__docker_pos_first_nonflag $main_options_with_args_glob)"
|
||||
local counter=$( __docker_pos_first_nonflag $(__docker_to_extglob "$global_options_with_args") )
|
||||
if [ $cword -eq $counter ]; then
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
fi
|
||||
@@ -478,6 +453,84 @@ _docker_create() {
|
||||
_docker_run
|
||||
}
|
||||
|
||||
_docker_daemon() {
|
||||
local boolean_options="
|
||||
$global_boolean_options
|
||||
--help -h
|
||||
--icc=false
|
||||
--ip-forward=false
|
||||
--ip-masq=false
|
||||
--iptables=false
|
||||
--ipv6
|
||||
--selinux-enabled
|
||||
--userland-proxy=false
|
||||
"
|
||||
local options_with_args="
|
||||
$global_options_with_args
|
||||
--api-cors-header
|
||||
--bip
|
||||
--bridge -b
|
||||
--default-gateway
|
||||
--default-gateway-v6
|
||||
--default-ulimit
|
||||
--dns
|
||||
--dns-search
|
||||
--exec-driver -e
|
||||
--exec-opt
|
||||
--exec-root
|
||||
--fixed-cidr
|
||||
--fixed-cidr-v6
|
||||
--graph -g
|
||||
--group -G
|
||||
--insecure-registry
|
||||
--ip
|
||||
--label
|
||||
--log-driver
|
||||
--log-opt
|
||||
--mtu
|
||||
--pidfile -p
|
||||
--registry-mirror
|
||||
--storage-driver -s
|
||||
--storage-opt
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
--exec-root|--graph|-g)
|
||||
_filedir -d
|
||||
return
|
||||
;;
|
||||
--log-driver)
|
||||
__docker_log_drivers
|
||||
return
|
||||
;;
|
||||
--pidfile|-p|--tlscacert|--tlscert|--tlskey)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
--storage-driver|-s)
|
||||
COMPREPLY=( $( compgen -W "aufs devicemapper btrfs overlay" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) )
|
||||
return
|
||||
;;
|
||||
--log-level|-l)
|
||||
__docker_log_levels
|
||||
return
|
||||
;;
|
||||
--log-opt)
|
||||
__docker_log_driver_options
|
||||
return
|
||||
;;
|
||||
$(__docker_to_extglob "$options_with_args") )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_diff() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
@@ -685,8 +738,17 @@ _docker_inspect() {
|
||||
COMPREPLY=( $( compgen -W "--format -f --type --help" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_containers_and_images
|
||||
;;
|
||||
case $(__docker_value_of_option --type) in
|
||||
'')
|
||||
__docker_containers_and_images
|
||||
;;
|
||||
container)
|
||||
__docker_containers_all
|
||||
;;
|
||||
image)
|
||||
__docker_image_repos_and_tags_and_ids
|
||||
;;
|
||||
esac
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -1287,6 +1349,7 @@ _docker() {
|
||||
commit
|
||||
cp
|
||||
create
|
||||
daemon
|
||||
diff
|
||||
events
|
||||
exec
|
||||
@@ -1323,40 +1386,21 @@ _docker() {
|
||||
wait
|
||||
)
|
||||
|
||||
local main_options_with_args="
|
||||
--api-cors-header
|
||||
--bip
|
||||
--bridge -b
|
||||
--default-gateway
|
||||
--default-gateway-v6
|
||||
--default-ulimit
|
||||
--dns
|
||||
--dns-search
|
||||
--exec-driver -e
|
||||
--exec-opt
|
||||
--exec-root
|
||||
--fixed-cidr
|
||||
--fixed-cidr-v6
|
||||
--graph -g
|
||||
--group -G
|
||||
# These options are valid as global options for all client commands
|
||||
# and valid as command options for `docker daemon`
|
||||
local global_boolean_options="
|
||||
--debug -D
|
||||
--tls
|
||||
--tlsverify
|
||||
"
|
||||
local global_options_with_args="
|
||||
--host -H
|
||||
--insecure-registry
|
||||
--ip
|
||||
--label
|
||||
--log-driver
|
||||
--log-level -l
|
||||
--log-opt
|
||||
--mtu
|
||||
--pidfile -p
|
||||
--registry-mirror
|
||||
--storage-driver -s
|
||||
--storage-opt
|
||||
--tlscacert
|
||||
--tlscert
|
||||
--tlskey
|
||||
"
|
||||
|
||||
local main_options_with_args_glob=$(__docker_to_extglob "$main_options_with_args")
|
||||
local host
|
||||
|
||||
COMPREPLY=()
|
||||
@@ -1372,7 +1416,7 @@ _docker() {
|
||||
(( counter++ ))
|
||||
host="${words[$counter]}"
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
$(__docker_to_extglob "$global_options_with_args") )
|
||||
(( counter++ ))
|
||||
;;
|
||||
-*)
|
||||
|
||||
@@ -70,6 +70,66 @@ func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNon
|
||||
return container.ExtractToDir(path, noOverwriteDirNonDir, content)
|
||||
}
|
||||
|
||||
// resolvePath resolves the given path in the container to a resource on the
|
||||
// host. Returns a resolved path (absolute path to the resource on the host),
|
||||
// the absolute path to the resource relative to the container's rootfs, and
|
||||
// a error if the path points to outside the container's rootfs.
|
||||
func (container *Container) resolvePath(path string) (resolvedPath, absPath string, err error) {
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
|
||||
// Split the absPath into its Directory and Base components. We will
|
||||
// resolve the dir in the scope of the container then append the base.
|
||||
dirPath, basePath := filepath.Split(absPath)
|
||||
|
||||
resolvedDirPath, err := container.GetResourcePath(dirPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
|
||||
return resolvedPath, absPath, nil
|
||||
}
|
||||
|
||||
// statPath is the unexported version of StatPath. Locks and mounts should
|
||||
// be aquired before calling this method and the given path should be fully
|
||||
// resolved to a path on the host corresponding to the given absolute path
|
||||
// inside the container.
|
||||
func (container *Container) statPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var linkTarget string
|
||||
if lstat.Mode()&os.ModeSymlink != 0 {
|
||||
// Fully evaluate the symlink in the scope of the container rootfs.
|
||||
hostPath, err := container.GetResourcePath(absPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
linkTarget, err = filepath.Rel(container.basefs, hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make it an absolute path.
|
||||
linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
|
||||
}
|
||||
|
||||
return &types.ContainerPathStat{
|
||||
Name: filepath.Base(absPath),
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
LinkTarget: linkTarget,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StatPath stats the filesystem resource at the specified path in this
|
||||
// container. Returns stat info about the resource.
|
||||
func (container *Container) StatPath(path string) (stat *types.ContainerPathStat, err error) {
|
||||
@@ -87,39 +147,12 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := path
|
||||
if !filepath.IsAbs(absPath) {
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
|
||||
}
|
||||
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
resolvedPath, absPath, err := container.resolvePath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// A trailing "." or separator has important meaning. For example, if
|
||||
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
|
||||
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
|
||||
// target. If the basename of the path is ".", it means to archive the
|
||||
// contents of the directory with "." as the first path component rather
|
||||
// than the name of the directory. This would cause extraction of the
|
||||
// archive to *not* make another directory, but instead use the current
|
||||
// directory.
|
||||
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
|
||||
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.ContainerPathStat{
|
||||
Name: lstat.Name(),
|
||||
Path: absPath,
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
}, nil
|
||||
return container.statPath(resolvedPath, absPath)
|
||||
}
|
||||
|
||||
// ArchivePath creates an archive of the filesystem resource at the specified
|
||||
@@ -154,41 +187,25 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := path
|
||||
if !filepath.IsAbs(absPath) {
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
|
||||
}
|
||||
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
resolvedPath, absPath, err := container.resolvePath(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// A trailing "." or separator has important meaning. For example, if
|
||||
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
|
||||
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
|
||||
// target. If the basename of the path is ".", it means to archive the
|
||||
// contents of the directory with "." as the first path component rather
|
||||
// than the name of the directory. This would cause extraction of the
|
||||
// archive to *not* make another directory, but instead use the current
|
||||
// directory.
|
||||
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
|
||||
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
stat, err = container.statPath(resolvedPath, absPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
stat = &types.ContainerPathStat{
|
||||
Name: lstat.Name(),
|
||||
Path: absPath,
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
}
|
||||
|
||||
data, err := archive.TarResource(resolvedPath)
|
||||
// We need to rebase the archive entries if the last element of the
|
||||
// resolved path was a symlink that was evaluated and is now different
|
||||
// than the requested path. For example, if the given path was "/foo/bar/",
|
||||
// but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want
|
||||
// to ensure that the archive entries start with "bar" and not "baz". This
|
||||
// also catches the case when the root directory of the container is
|
||||
// requested: we want the archive entries to start with "/" and not the
|
||||
// container ID.
|
||||
data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -227,27 +244,21 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
|
||||
return err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := path
|
||||
if !filepath.IsAbs(absPath) {
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
|
||||
}
|
||||
// The destination path needs to be resolved to a host path, with all
|
||||
// symbolic links followed in the scope of the container's rootfs. Note
|
||||
// that we do not use `container.resolvePath(path)` here because we need
|
||||
// to also evaluate the last path element if it is a symlink. This is so
|
||||
// that you can extract an archive to a symlink that points to a directory.
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
|
||||
// This will evaluate the last path element if it is a symlink.
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// A trailing "." or separator has important meaning. For example, if
|
||||
// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
|
||||
// will stat the link itself, while `os.Lstat("foo/")` will stat the link
|
||||
// target. If the basename of the path is ".", it means to archive the
|
||||
// contents of the directory with "." as the first path component rather
|
||||
// than the name of the directory. This would cause extraction of the
|
||||
// archive to *not* make another directory, but instead use the current
|
||||
// directory.
|
||||
resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -257,23 +268,23 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
|
||||
return ErrExtractPointNotDirectory
|
||||
}
|
||||
|
||||
// Need to check if the path is in a volume. If it is, it cannot be in a
|
||||
// read-only volume. If it is not in a volume, the container cannot be
|
||||
// configured with a read-only rootfs.
|
||||
|
||||
// Use the resolved path relative to the container rootfs as the new
|
||||
// absPath. This way we fully follow any symlinks in a volume that may
|
||||
// lead back outside the volume.
|
||||
baseRel, err := filepath.Rel(container.basefs, resolvedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
absPath = filepath.Join("/", baseRel)
|
||||
// Make it an absolute path.
|
||||
absPath = filepath.Join(string(filepath.Separator), baseRel)
|
||||
|
||||
// Need to check if the path is in a volume. If it is, it cannot be in a
|
||||
// read-only volume. If it is not in a volume, the container cannot be
|
||||
// configured with a read-only rootfs.
|
||||
var toVolume bool
|
||||
for _, mnt := range container.MountPoints {
|
||||
if toVolume = mnt.hasResource(absPath); toVolume {
|
||||
if mnt.RW {
|
||||
break
|
||||
}
|
||||
return ErrVolumeReadonly
|
||||
}
|
||||
toVolume, err := checkIfPathIsInAVolume(container, absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !toVolume && container.hostConfig.ReadonlyRootfs {
|
||||
@@ -295,3 +306,19 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
|
||||
// cannot be in a read-only volume. If it is not in a volume, the container
|
||||
// cannot be configured with a read-only rootfs.
|
||||
func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) {
|
||||
var toVolume bool
|
||||
for _, mnt := range container.MountPoints {
|
||||
if toVolume = mnt.hasResource(absPath); toVolume {
|
||||
if mnt.RW {
|
||||
break
|
||||
}
|
||||
return false, ErrVolumeReadonly
|
||||
}
|
||||
}
|
||||
return toVolume, nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,14 @@ var (
|
||||
ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only")
|
||||
)
|
||||
|
||||
type ErrContainerNotRunning struct {
|
||||
id string
|
||||
}
|
||||
|
||||
func (e ErrContainerNotRunning) Error() string {
|
||||
return fmt.Sprintf("Container %s is not running", e.id)
|
||||
}
|
||||
|
||||
type StreamConfig struct {
|
||||
stdout *broadcastwriter.BroadcastWriter
|
||||
stderr *broadcastwriter.BroadcastWriter
|
||||
@@ -371,7 +379,7 @@ func (container *Container) KillSig(sig int) error {
|
||||
}
|
||||
|
||||
if !container.Running {
|
||||
return fmt.Errorf("Container %s is not running", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// signal to the monitor that it should not restart the container
|
||||
@@ -408,7 +416,7 @@ func (container *Container) Pause() error {
|
||||
|
||||
// We cannot Pause the container which is not running
|
||||
if !container.Running {
|
||||
return fmt.Errorf("Container %s is not running, cannot pause a non-running container", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// We cannot Pause the container which is already paused
|
||||
@@ -430,7 +438,7 @@ func (container *Container) Unpause() error {
|
||||
|
||||
// We cannot unpause the container which is not running
|
||||
if !container.Running {
|
||||
return fmt.Errorf("Container %s is not running, cannot unpause a non-running container", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// We cannot unpause the container which is not paused
|
||||
@@ -448,7 +456,7 @@ func (container *Container) Unpause() error {
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
if !container.IsRunning() {
|
||||
return fmt.Errorf("Container %s is not running", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
|
||||
// 1. Send SIGKILL
|
||||
@@ -530,7 +538,7 @@ func (container *Container) Restart(seconds int) error {
|
||||
|
||||
func (container *Container) Resize(h, w int) error {
|
||||
if !container.IsRunning() {
|
||||
return fmt.Errorf("Cannot resize container %s, container is not running", container.ID)
|
||||
return ErrContainerNotRunning{container.ID}
|
||||
}
|
||||
if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
|
||||
return err
|
||||
@@ -1080,8 +1088,12 @@ func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
|
||||
|
||||
func (container *Container) networkMounts() []execdriver.Mount {
|
||||
var mounts []execdriver.Mount
|
||||
mode := "Z"
|
||||
if container.hostConfig.NetworkMode.IsContainer() {
|
||||
mode = "z"
|
||||
}
|
||||
if container.ResolvConfPath != "" {
|
||||
label.SetFileLabel(container.ResolvConfPath, container.MountLabel)
|
||||
label.Relabel(container.ResolvConfPath, container.MountLabel, mode)
|
||||
mounts = append(mounts, execdriver.Mount{
|
||||
Source: container.ResolvConfPath,
|
||||
Destination: "/etc/resolv.conf",
|
||||
@@ -1090,7 +1102,7 @@ func (container *Container) networkMounts() []execdriver.Mount {
|
||||
})
|
||||
}
|
||||
if container.HostnamePath != "" {
|
||||
label.SetFileLabel(container.HostnamePath, container.MountLabel)
|
||||
label.Relabel(container.HostnamePath, container.MountLabel, mode)
|
||||
mounts = append(mounts, execdriver.Mount{
|
||||
Source: container.HostnamePath,
|
||||
Destination: "/etc/hostname",
|
||||
@@ -1099,7 +1111,7 @@ func (container *Container) networkMounts() []execdriver.Mount {
|
||||
})
|
||||
}
|
||||
if container.HostsPath != "" {
|
||||
label.SetFileLabel(container.HostsPath, container.MountLabel)
|
||||
label.Relabel(container.HostsPath, container.MountLabel, mode)
|
||||
mounts = append(mounts, execdriver.Mount{
|
||||
Source: container.HostsPath,
|
||||
Destination: "/etc/hosts",
|
||||
|
||||
@@ -272,7 +272,11 @@ func populateCommand(c *Container, env []string) error {
|
||||
BlkioWeight: c.hostConfig.BlkioWeight,
|
||||
Rlimits: rlimits,
|
||||
OomKillDisable: c.hostConfig.OomKillDisable,
|
||||
MemorySwappiness: c.hostConfig.MemorySwappiness,
|
||||
MemorySwappiness: -1,
|
||||
}
|
||||
|
||||
if c.hostConfig.MemorySwappiness != nil {
|
||||
resources.MemorySwappiness = *c.hostConfig.MemorySwappiness
|
||||
}
|
||||
|
||||
processConfig := execdriver.ProcessConfig{
|
||||
|
||||
@@ -167,13 +167,16 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig,
|
||||
if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
|
||||
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.")
|
||||
}
|
||||
if hostConfig.MemorySwappiness != -1 && !daemon.SystemConfig().MemorySwappiness {
|
||||
if hostConfig.MemorySwappiness != nil && !daemon.SystemConfig().MemorySwappiness {
|
||||
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
|
||||
logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
|
||||
hostConfig.MemorySwappiness = -1
|
||||
hostConfig.MemorySwappiness = nil
|
||||
}
|
||||
if hostConfig.MemorySwappiness != -1 && (hostConfig.MemorySwappiness < 0 || hostConfig.MemorySwappiness > 100) {
|
||||
return warnings, fmt.Errorf("Invalid value: %d, valid memory swappiness range is 0-100.", hostConfig.MemorySwappiness)
|
||||
if hostConfig.MemorySwappiness != nil {
|
||||
swappiness := *hostConfig.MemorySwappiness
|
||||
if swappiness < -1 || swappiness > 100 {
|
||||
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness)
|
||||
}
|
||||
}
|
||||
if hostConfig.CpuPeriod > 0 && !daemon.SystemConfig().CpuCfsPeriod {
|
||||
warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.")
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/daemon/execdriver"
|
||||
"github.com/docker/docker/daemon/execdriver/lxc"
|
||||
"github.com/docker/docker/daemon/execdriver/native"
|
||||
@@ -18,6 +19,7 @@ func NewDriver(name string, options []string, root, libPath, initPath string, sy
|
||||
// we want to give the lxc driver the full docker root because it needs
|
||||
// to access and write config and template files in /var/lib/docker/containers/*
|
||||
// to be backwards compatible
|
||||
logrus.Warn("LXC built-in support is deprecated.")
|
||||
return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor)
|
||||
case "native":
|
||||
return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, options)
|
||||
|
||||
164
daemon/execdriver/native/apparmor.go
Normal file
164
daemon/execdriver/native/apparmor.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// +build linux
|
||||
|
||||
package native
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
)
|
||||
|
||||
const (
|
||||
apparmorProfilePath = "/etc/apparmor.d/docker"
|
||||
)
|
||||
|
||||
type data struct {
|
||||
Name string
|
||||
Imports []string
|
||||
InnerImports []string
|
||||
}
|
||||
|
||||
const baseTemplate = `
|
||||
{{range $value := .Imports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
|
||||
{{range $value := .InnerImports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
signal (receive) peer=docker-unconfined,
|
||||
|
||||
deny @{PROC}/sys/fs/** wklx,
|
||||
deny @{PROC}/fs/** wklx,
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/mem rwklx,
|
||||
deny @{PROC}/kmem rwklx,
|
||||
deny @{PROC}/kore rwklx,
|
||||
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
|
||||
deny @{PROC}/sys/kernel/*/** wklx,
|
||||
|
||||
deny mount,
|
||||
deny ptrace (trace) peer=docker-default,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/efi/efivars/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
}
|
||||
|
||||
profile docker-unconfined flags=(attach_disconnected,mediate_deleted,complain) {
|
||||
#include <abstractions/base>
|
||||
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
mount,
|
||||
pivot_root,
|
||||
change_profile -> *,
|
||||
|
||||
ptrace,
|
||||
signal,
|
||||
}
|
||||
`
|
||||
|
||||
func generateProfile(out io.Writer) error {
|
||||
compiled, err := template.New("apparmor_profile").Parse(baseTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := &data{
|
||||
Name: "docker-default",
|
||||
}
|
||||
if tunablesExists() {
|
||||
data.Imports = append(data.Imports, "#include <tunables/global>")
|
||||
} else {
|
||||
data.Imports = append(data.Imports, "@{PROC}=/proc/")
|
||||
}
|
||||
if abstractionsExists() {
|
||||
data.InnerImports = append(data.InnerImports, "#include <abstractions/base>")
|
||||
}
|
||||
if err := compiled.Execute(out, data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the tunables/global exist
|
||||
func tunablesExists() bool {
|
||||
_, err := os.Stat("/etc/apparmor.d/tunables/global")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// check if abstractions/base exist
|
||||
func abstractionsExists() bool {
|
||||
_, err := os.Stat("/etc/apparmor.d/abstractions/base")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func installAppArmorProfile() error {
|
||||
if !apparmor.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure /etc/apparmor.d exists
|
||||
if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := generateProfile(f); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
cmd := exec.Command("/sbin/apparmor_parser", "-r", "-W", "docker")
|
||||
// to use the parser directly we have to make sure we are in the correct
|
||||
// dir with the profile
|
||||
cmd.Dir = "/etc/apparmor.d"
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading docker apparmor profile: %s (%s)", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasAppArmorProfileLoaded(profile string) error {
|
||||
file, err := os.Open("/sys/kernel/security/apparmor/profiles")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bufio.NewReader(file)
|
||||
for {
|
||||
p, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(p, profile+" ") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error)
|
||||
}
|
||||
|
||||
/* These paths must be remounted as r/o */
|
||||
container.ReadonlyPaths = append(container.ReadonlyPaths, "/proc", "/dev")
|
||||
container.ReadonlyPaths = append(container.ReadonlyPaths, "/dev")
|
||||
}
|
||||
|
||||
if err := d.setupMounts(container, c); err != nil {
|
||||
@@ -198,7 +198,7 @@ func (d *driver) setPrivileged(container *configs.Config) (err error) {
|
||||
container.Devices = hostDevices
|
||||
|
||||
if apparmor.IsEnabled() {
|
||||
container.AppArmorProfile = "unconfined"
|
||||
container.AppArmorProfile = "docker-unconfined"
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
sysinfo "github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
@@ -51,6 +52,20 @@ func NewDriver(root, initPath string, options []string) (*driver, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if apparmor.IsEnabled() {
|
||||
if err := installAppArmorProfile(); err != nil {
|
||||
apparmor_profiles := []string{"docker-default", "docker-unconfined"}
|
||||
|
||||
// Allow daemon to run if loading failed, but are active
|
||||
// (possibly through another run, manually, or via system startup)
|
||||
for _, policy := range apparmor_profiles {
|
||||
if err := hasAppArmorProfileLoaded(policy); err != nil {
|
||||
return nil, fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choose cgroup manager
|
||||
// this makes sure there are no breaking changes to people
|
||||
// who upgrade from versions without native.cgroupdriver opt
|
||||
|
||||
@@ -323,7 +323,7 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
|
||||
}
|
||||
|
||||
func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
|
||||
return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil)
|
||||
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), nil)
|
||||
}
|
||||
|
||||
// DiffSize calculates the changes between the specified id
|
||||
|
||||
@@ -77,6 +77,7 @@ type Driver interface {
|
||||
// ApplyDiff extracts the changeset from the given diff into the
|
||||
// layer with the specified id and parent, returning the size of the
|
||||
// new layer in bytes.
|
||||
// The archive.ArchiveReader must be an uncompressed stream.
|
||||
ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
|
||||
// DiffSize calculates the changes between the specified id
|
||||
// and its parent and returns the size in bytes of the changes
|
||||
|
||||
@@ -121,7 +121,7 @@ func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveRea
|
||||
|
||||
start := time.Now().UTC()
|
||||
logrus.Debugf("Start untar layer")
|
||||
if size, err = chrootarchive.ApplyLayer(layerFs, diff); err != nil {
|
||||
if size, err = chrootarchive.ApplyUncompressedLayer(layerFs, diff); err != nil {
|
||||
return
|
||||
}
|
||||
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
||||
|
||||
@@ -411,7 +411,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if size, err = chrootarchive.ApplyLayer(tmpRootDir, diff); err != nil {
|
||||
if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
@@ -91,13 +92,13 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
|
||||
Pid: container.State.Pid,
|
||||
ExitCode: container.State.ExitCode,
|
||||
Error: container.State.Error,
|
||||
StartedAt: container.State.StartedAt,
|
||||
FinishedAt: container.State.FinishedAt,
|
||||
StartedAt: container.State.StartedAt.Format(time.RFC3339Nano),
|
||||
FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano),
|
||||
}
|
||||
|
||||
contJSONBase := &types.ContainerJSONBase{
|
||||
Id: container.ID,
|
||||
Created: container.Created,
|
||||
Created: container.Created.Format(time.RFC3339Nano),
|
||||
Path: container.Path,
|
||||
Args: container.Args,
|
||||
State: containerState,
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
import "syscall"
|
||||
|
||||
// ContainerKill send signal to the container
|
||||
// If no signal is given (sig 0), then Kill with SIGKILL and wait
|
||||
@@ -18,12 +15,12 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
|
||||
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
|
||||
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
||||
if err := container.Kill(); err != nil {
|
||||
return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Otherwise, just send the requested signal
|
||||
if err := container.KillSig(int(sig)); err != nil {
|
||||
return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -93,9 +93,9 @@ func New(ctx logger.Context) (logger.Logger, error) {
|
||||
}
|
||||
logrus.Debugf("logging driver fluentd configured for container:%s, host:%s, port:%d, tag:%s.", ctx.ContainerID, host, port, tag)
|
||||
|
||||
// logger tries to recoonect 2**64 - 1 times
|
||||
// logger tries to recoonect 2**32 - 1 times
|
||||
// failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds]
|
||||
log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxUint32})
|
||||
log, err := fluent.New(fluent.Config{FluentPort: port, FluentHost: host, RetryWait: 1000, MaxRetry: math.MaxInt32})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -259,7 +259,8 @@ func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.R
|
||||
if !config.Follow {
|
||||
return
|
||||
}
|
||||
if config.Tail == 0 {
|
||||
|
||||
if config.Tail >= 0 {
|
||||
latestFile.Seek(0, os.SEEK_END)
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,12 @@ func NewLogWatcher() *LogWatcher {
|
||||
|
||||
// Close notifies the underlying log reader to stop
|
||||
func (w *LogWatcher) Close() {
|
||||
close(w.closeNotifier)
|
||||
// only close if not already closed
|
||||
select {
|
||||
case <-w.closeNotifier:
|
||||
default:
|
||||
close(w.closeNotifier)
|
||||
}
|
||||
}
|
||||
|
||||
// WatchClose returns a channel receiver that receives notification when the watcher has been closed
|
||||
|
||||
@@ -100,6 +100,7 @@ func migrateKey() (err error) {
|
||||
err = os.Remove(oldPath)
|
||||
} else {
|
||||
logrus.Warnf("Key migration failed, key file not removed at %s", oldPath)
|
||||
os.Remove(newPath)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -226,7 +227,7 @@ func (cli *DaemonCli) CmdDaemon(args ...string) error {
|
||||
}
|
||||
tlsConfig, err := tlsconfig.Server(*commonFlags.TLSOptions)
|
||||
if err != nil {
|
||||
logrus.Fatalf("foobar: %v", err)
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
serverConfig.TLSConfig = tlsConfig
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ weight = 99
|
||||
# Automatically start containers
|
||||
|
||||
As of Docker 1.2,
|
||||
[restart policies](/reference/commandline/cli/#restart-policies) are the
|
||||
[restart policies](/reference/run/#restart-policies-restart) are the
|
||||
built-in Docker mechanism for restarting containers when they exit. If set,
|
||||
restart policies will be used when the Docker daemon starts up, as typically
|
||||
happens after a system boot. Restart policies will ensure that linked containers
|
||||
|
||||
@@ -31,6 +31,12 @@ Follow the instructions in the plugin's documentation.
|
||||
|
||||
The following plugins exist:
|
||||
|
||||
* The [Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume)
|
||||
is a volume plugin that provides access to an extensible set of
|
||||
container-based persistent storage options. It supports single and multi-host Docker
|
||||
environments with features that include tenant isolation, automated
|
||||
provisioning, encryption, secure deletion, snapshots and QoS.
|
||||
|
||||
* The [Flocker plugin](https://clusterhq.com/docker-plugin/) is a volume plugin
|
||||
which provides multi-host portable volumes for Docker, enabling you to run
|
||||
databases and other stateful containers and move them around across a cluster
|
||||
|
||||
@@ -64,8 +64,7 @@ a container. To exit the container type `exit`.
|
||||
If you want your containers to be able to access the external network you must
|
||||
enable the `net.ipv4.ip_forward` rule.
|
||||
This can be done using YaST by browsing to the
|
||||
`Network Devices -> Network Settings -> Routing` menu and ensuring that the
|
||||
`Enable IPv4 Forwarding` box is checked.
|
||||
`System -> Network Settings -> Routing` menu (for openSUSE Tumbleweed and later) or `Network Devices -> Network Settings -> Routing` menu (for SUSE Linux Enterprise 12 and previous openSUSE versions) and ensuring that the `Enable IPv4 Forwarding` box is checked.
|
||||
|
||||
This option cannot be changed when networking is handled by the Network Manager.
|
||||
In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by
|
||||
|
||||
@@ -12,6 +12,14 @@ parent = "mn_use_docker"
|
||||
|
||||
The following list of features are deprecated.
|
||||
|
||||
### LXC built-in exec driver
|
||||
**Deprecated In Release: v1.8**
|
||||
|
||||
**Target For Removal In Release: v1.10**
|
||||
|
||||
The built-in LXC execution driver is deprecated for an external implementation.
|
||||
The lxc-conf flag and API fields will also be removed.
|
||||
|
||||
### Old Command Line Options
|
||||
**Deprecated In Release: [v1.8.0](/release-notes/#docker-engine-1-8-0)**
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ of a 404.
|
||||
You can now supply a `stream` bool to get only one set of stats and
|
||||
disconnect
|
||||
|
||||
`GET /containers(id)/logs`
|
||||
`GET /containers/(id)/logs`
|
||||
|
||||
**New!**
|
||||
|
||||
@@ -138,6 +138,7 @@ In addition, the end point now returns the new boolean fields
|
||||
This endpoint now returns `Os`, `Arch` and `KernelVersion`.
|
||||
|
||||
`POST /containers/create`
|
||||
|
||||
`POST /containers/(id)/start`
|
||||
|
||||
**New!**
|
||||
|
||||
@@ -1109,7 +1109,7 @@ Query Parameters:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/x-tar
|
||||
X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInBhdGgiOiIvcm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oifQ==
|
||||
X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0=
|
||||
|
||||
{{ TAR STREAM }}
|
||||
|
||||
@@ -1120,10 +1120,10 @@ JSON object (whitespace added for readability):
|
||||
|
||||
{
|
||||
"name": "root",
|
||||
"path": "/root",
|
||||
"size": 4096,
|
||||
"mode": 2147484096,
|
||||
"mtime": "2014-02-27T20:51:23Z"
|
||||
"mtime": "2014-02-27T20:51:23Z",
|
||||
"linkTarget": ""
|
||||
}
|
||||
|
||||
A `HEAD` request can also be made to this endpoint if only this information is
|
||||
|
||||
45
docs/security/apparmor.md
Normal file
45
docs/security/apparmor.md
Normal file
@@ -0,0 +1,45 @@
|
||||
AppArmor security profiles for Docker
|
||||
--------------------------------------
|
||||
|
||||
AppArmor (Application Armor) is a security module that allows a system
|
||||
administrator to associate a security profile with each program. Docker
|
||||
expects to find an AppArmor policy loaded and enforced.
|
||||
|
||||
Container profiles are loaded automatically by Docker. A profile
|
||||
for the Docker Engine itself also exists and is installed
|
||||
with the official *.deb* packages. Advanced users and package
|
||||
managers may find the profile for */usr/bin/docker* underneath
|
||||
[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
|
||||
in the Docker Engine source repository.
|
||||
|
||||
|
||||
Understand the policies
|
||||
------------------------
|
||||
|
||||
The `docker-default` profile the default for running
|
||||
containers. It is moderately protective while
|
||||
providing wide application compatability.
|
||||
|
||||
The `docker-unconfined` profile is intended for
|
||||
privileged applications and is the default when runing
|
||||
a container with the *--privileged* flag.
|
||||
|
||||
The system's standard `unconfined` profile inherits all
|
||||
system-wide policies, applying path-based policies
|
||||
intended for the host system inside of containers.
|
||||
This was the default for privileged containers
|
||||
prior to Docker 1.8.
|
||||
|
||||
|
||||
Overriding the profile for a container
|
||||
---------------------------------------
|
||||
|
||||
Users may override the AppArmor profile using the
|
||||
`security-opt` option (per-container).
|
||||
|
||||
For example, the following explicitly specifies the default policy:
|
||||
|
||||
```
|
||||
$ docker run --rm -it --security-opt apparmor:docker-default hello-world
|
||||
```
|
||||
|
||||
@@ -61,8 +61,6 @@ After downloading the appropriate binary, you can follow the instructions
|
||||
|
||||
## Current experimental features
|
||||
|
||||
* [Support for Docker plugins](plugins.md)
|
||||
* [Volume plugins](plugins_volume.md)
|
||||
* [Network plugins](plugins_network.md)
|
||||
* [Native Multi-host networking](networking.md)
|
||||
* [Compose, Swarm and networking integration](compose_swarm_networking.md)
|
||||
|
||||
@@ -102,13 +102,12 @@ func (p *v2Puller) pullV2Repository(tag string) (err error) {
|
||||
|
||||
// downloadInfo is used to pass information from download to extractor
|
||||
type downloadInfo struct {
|
||||
img *image.Image
|
||||
tmpFile *os.File
|
||||
digest digest.Digest
|
||||
layer distribution.ReadSeekCloser
|
||||
size int64
|
||||
err chan error
|
||||
verified bool
|
||||
img *image.Image
|
||||
tmpFile *os.File
|
||||
digest digest.Digest
|
||||
layer distribution.ReadSeekCloser
|
||||
size int64
|
||||
err chan error
|
||||
}
|
||||
|
||||
type errVerification struct{}
|
||||
@@ -176,9 +175,11 @@ func (p *v2Puller) download(di *downloadInfo) {
|
||||
|
||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))
|
||||
|
||||
di.verified = verifier.Verified()
|
||||
if !di.verified {
|
||||
logrus.Infof("Image verification failed for layer %s", di.digest)
|
||||
if !verifier.Verified() {
|
||||
err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
|
||||
logrus.Error(err)
|
||||
di.err <- err
|
||||
return
|
||||
}
|
||||
|
||||
out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
|
||||
@@ -252,7 +253,6 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
verified = verified && d.verified
|
||||
if d.layer != nil {
|
||||
// if tmpFile is empty assume download and extracted elsewhere
|
||||
defer os.Remove(d.tmpFile.Name())
|
||||
@@ -368,6 +368,28 @@ func (p *v2Puller) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey
|
||||
}
|
||||
|
||||
func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) {
|
||||
// If pull by digest, then verify the manifest digest. NOTE: It is
|
||||
// important to do this first, before any other content validation. If the
|
||||
// digest cannot be verified, don't even bother with those other things.
|
||||
if manifestDigest, err := digest.ParseDigest(tag); err == nil {
|
||||
verifier, err := digest.NewDigestVerifier(manifestDigest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
payload, err := m.Payload()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err := verifier.Write(payload); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
err := fmt.Errorf("image verification failed for digest %s", manifestDigest)
|
||||
logrus.Error(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ?
|
||||
if m == nil {
|
||||
return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
|
||||
@@ -389,21 +411,5 @@ func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (ver
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error verifying manifest keys: %v", err)
|
||||
}
|
||||
localDigest, err := digest.ParseDigest(tag)
|
||||
// if pull by digest, then verify
|
||||
if err == nil {
|
||||
verifier, err := digest.NewDigestVerifier(localDigest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
payload, err := m.Payload()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err := verifier.Write(payload); err != nil {
|
||||
return false, err
|
||||
}
|
||||
verified = verified && verifier.Verified()
|
||||
}
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -34,7 +35,7 @@ func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) {
|
||||
Id: image.ID,
|
||||
Parent: image.Parent,
|
||||
Comment: image.Comment,
|
||||
Created: image.Created,
|
||||
Created: image.Created.Format(time.RFC3339Nano),
|
||||
Container: image.Container,
|
||||
ContainerConfig: &image.ContainerConfig,
|
||||
DockerVersion: image.DockerVersion,
|
||||
|
||||
@@ -119,41 +119,76 @@ do_install() {
|
||||
dist_version=''
|
||||
if command_exists lsb_release; then
|
||||
lsb_dist="$(lsb_release -si)"
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
|
||||
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
|
||||
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
|
||||
lsb_dist='debian'
|
||||
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
8)
|
||||
dist_version="jessie"
|
||||
;;
|
||||
|
||||
7)
|
||||
dist_version="wheezy"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
|
||||
lsb_dist='fedora'
|
||||
dist_version="$(rpm -qa \*-release | cut -d"-" -f3 | head -n1)"
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
|
||||
lsb_dist='oracleserver'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ]; then
|
||||
if [ -r /etc/centos-release ] || [ -r /etc/redhat-release ]; then
|
||||
lsb_dist='centos'
|
||||
dist_version="$(rpm -qa \*-release | cut -d"-" -f3 | head -n1)"
|
||||
fi
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
|
||||
lsb_dist="$(. /etc/os-release && echo "$ID")"
|
||||
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
|
||||
fi
|
||||
|
||||
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
|
||||
|
||||
case "$lsb_dist" in
|
||||
|
||||
ubuntu)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
|
||||
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
|
||||
fi
|
||||
;;
|
||||
|
||||
debian)
|
||||
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
8)
|
||||
dist_version="jessie"
|
||||
;;
|
||||
7)
|
||||
dist_version="wheezy"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
oracleserver)
|
||||
# need to switch lsb_dist to match yum repo URL
|
||||
lsb_dist="oraclelinux"
|
||||
dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
;;
|
||||
|
||||
fedora|centos)
|
||||
dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
;;
|
||||
|
||||
*)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
|
||||
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
|
||||
fi
|
||||
;;
|
||||
|
||||
|
||||
esac
|
||||
|
||||
|
||||
case "$lsb_dist" in
|
||||
amzn)
|
||||
(
|
||||
@@ -237,8 +272,8 @@ do_install() {
|
||||
exit 0
|
||||
;;
|
||||
|
||||
fedora|centos)
|
||||
cat >/etc/yum.repos.d/docker-${repo}.repo <<-EOF
|
||||
fedora|centos|oraclelinux)
|
||||
$sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF
|
||||
[docker-${repo}-repo]
|
||||
name=Docker ${repo} Repository
|
||||
baseurl=https://yum.dockerproject.org/repo/${repo}/${lsb_dist}/${dist_version}
|
||||
|
||||
@@ -33,7 +33,7 @@ override_dh_installudev:
|
||||
dh_installudev --priority=z80
|
||||
|
||||
override_dh_install:
|
||||
dh_apparmor --profile-name=docker -pdocker-engine
|
||||
dh_install
|
||||
dh_apparmor --profile-name=docker-engine -pdocker-engine
|
||||
|
||||
%:
|
||||
|
||||
@@ -21,17 +21,19 @@ APTDIR=$DOCKER_RELEASE_DIR/apt/repo
|
||||
mkdir -p "$APTDIR/conf" "$APTDIR/db"
|
||||
|
||||
# create/update distributions file
|
||||
for suite in $(exec contrib/reprepro/suites.sh); do
|
||||
cat <<-EOF
|
||||
Origin: Docker
|
||||
Suite: $suite
|
||||
Codename: $suite
|
||||
Architectures: amd64 i386
|
||||
Components: main testing experimental
|
||||
Description: Docker APT Repository
|
||||
if [[ ! -f "$APTDIR/conf/distributions" ]]; then
|
||||
for suite in $(exec contrib/reprepro/suites.sh); do
|
||||
cat <<-EOF
|
||||
Origin: Docker
|
||||
Suite: $suite
|
||||
Codename: $suite
|
||||
Architectures: amd64 i386
|
||||
Components: main testing experimental
|
||||
Description: Docker APT Repository
|
||||
|
||||
EOF
|
||||
done > "$APTDIR/conf/distributions"
|
||||
EOF
|
||||
done > "$APTDIR/conf/distributions"
|
||||
fi
|
||||
|
||||
# set the component and priority for the version being released
|
||||
component="main"
|
||||
|
||||
@@ -74,8 +74,7 @@ bundle_ubuntu() {
|
||||
|
||||
# Include contributed apparmor policy
|
||||
mkdir -p "$DIR/etc/apparmor.d/"
|
||||
cp contrib/apparmor/docker "$DIR/etc/apparmor.d/"
|
||||
cp contrib/apparmor/docker-engine "$DIR/etc/apparmor.d/"
|
||||
cp contrib/apparmor/* "$DIR/etc/apparmor.d/"
|
||||
|
||||
# Copy the binary
|
||||
# This will fail if the binary bundle hasn't been built
|
||||
@@ -95,7 +94,6 @@ if [ "$1" = 'configure' ] && [ -z "$2" ]; then
|
||||
fi
|
||||
|
||||
if ( aa-status --enabled ); then
|
||||
/sbin/apparmor_parser -r -W -T /etc/apparmor.d/docker
|
||||
/sbin/apparmor_parser -r -W -T /etc/apparmor.d/docker-engine
|
||||
fi
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ BUCKET=$AWS_S3_BUCKET
|
||||
# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
|
||||
|
||||
setup_s3() {
|
||||
echo "Setting up S3"
|
||||
# Try creating the bucket. Ignore errors (it might already exist).
|
||||
s3cmd mb "s3://$BUCKET" 2>/dev/null || true
|
||||
# Check access to the bucket.
|
||||
@@ -102,6 +103,7 @@ s3_url() {
|
||||
}
|
||||
|
||||
build_all() {
|
||||
echo "Building release"
|
||||
if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
|
||||
echo >&2
|
||||
echo >&2 'The build or tests appear to have failed.'
|
||||
@@ -162,6 +164,7 @@ upload_release_build() {
|
||||
}
|
||||
|
||||
release_build() {
|
||||
echo "Releasing binaries"
|
||||
GOOS=$1
|
||||
GOARCH=$2
|
||||
|
||||
@@ -246,6 +249,7 @@ release_build() {
|
||||
# 1. A full APT repository is published at $BUCKET/ubuntu/
|
||||
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
|
||||
release_ubuntu() {
|
||||
echo "Releasing ubuntu"
|
||||
[ -e "bundles/$VERSION/ubuntu" ] || {
|
||||
echo >&2 './hack/make.sh must be run before release_ubuntu'
|
||||
exit 1
|
||||
@@ -338,16 +342,19 @@ EOF
|
||||
|
||||
# Upload the index script
|
||||
release_index() {
|
||||
echo "Releasing index"
|
||||
sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET/index"
|
||||
}
|
||||
|
||||
release_test() {
|
||||
echo "Releasing tests"
|
||||
if [ -e "bundles/$VERSION/test" ]; then
|
||||
s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET/test/"
|
||||
fi
|
||||
}
|
||||
|
||||
setup_gpg() {
|
||||
echo "Setting up GPG"
|
||||
# Make sure that we have our keys
|
||||
mkdir -p "$HOME/.gnupg/"
|
||||
s3cmd sync "s3://$BUCKET/ubuntu/.gnupg/" "$HOME/.gnupg/" || true
|
||||
|
||||
@@ -21,7 +21,7 @@ clone git golang.org/x/net 3cffabab72adf04f8e3b01c5baf775361837b5fe https://gith
|
||||
clone hg code.google.com/p/gosqlite 74691fb6f837
|
||||
|
||||
#get libnetwork packages
|
||||
clone git github.com/docker/libnetwork f1c5671f1ee2133055144e566cd8b3a0ae4f0433
|
||||
clone git github.com/docker/libnetwork 78fc31ddc425fb379765c6b7ab5b96748bd8fc08
|
||||
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
|
||||
clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
|
||||
@@ -35,11 +35,11 @@ clone git github.com/coreos/go-etcd v2.0.0
|
||||
clone git github.com/hashicorp/consul v0.5.2
|
||||
|
||||
# get graph and distribution packages
|
||||
clone git github.com/docker/distribution cd8ff553b6b1911be23dfeabb73e33108bcbf147
|
||||
clone git github.com/docker/distribution 7dc8d4a26b689bd4892f2f2322dbce0b7119d686
|
||||
clone git github.com/vbatts/tar-split v0.9.4
|
||||
|
||||
clone git github.com/docker/notary 77bced079e83d80f40c1f0a544b1a8a3b97fb052
|
||||
clone git github.com/endophage/gotuf 374908abc8af7e953a2813c5c2b3944ab625ca68
|
||||
clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
|
||||
clone git github.com/endophage/gotuf 89ceb27829b9353dfee5ccccf7a3a9bb77008b05
|
||||
clone git github.com/tent/canonical-json-go 96e4ba3a7613a1216cbd1badca4efe382adea337
|
||||
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
|
||||
|
||||
@@ -4,9 +4,11 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -1687,3 +1689,45 @@ func (s *DockerSuite) TestPostContainersStartWithLinksInHostConfigIdLinked(c *ch
|
||||
c.Assert(res.StatusCode, check.Equals, http.StatusNoContent)
|
||||
b.Close()
|
||||
}
|
||||
|
||||
// #14915
|
||||
func (s *DockerSuite) TestContainersApiCreateNoHostConfig118(c *check.C) {
|
||||
config := struct {
|
||||
Image string
|
||||
}{"busybox"}
|
||||
status, _, err := sockRequest("POST", "/v1.18/containers/create", config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(status, check.Equals, http.StatusCreated)
|
||||
}
|
||||
|
||||
// Ensure an error occurs when you have a container read-only rootfs but you
|
||||
// extract an archive to a symlink in a writable volume which points to a
|
||||
// directory outside of the volume.
|
||||
func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) {
|
||||
testRequires(c, SameHostDaemon) // Requires local volume mount bind.
|
||||
|
||||
testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-")
|
||||
defer os.RemoveAll(testVol)
|
||||
|
||||
makeTestContentInDir(c, testVol)
|
||||
|
||||
cID := makeTestContainer(c, testContainerOptions{
|
||||
readOnly: true,
|
||||
volumes: defaultVolumes(testVol), // Our bind mount is at /vol2
|
||||
})
|
||||
defer deleteContainer(cID)
|
||||
|
||||
// Attempt to extract to a symlink in the volume which points to a
|
||||
// directory outside the volume. This should cause an error because the
|
||||
// rootfs is read-only.
|
||||
query := make(url.Values, 1)
|
||||
query.Set("path", "/vol2/symlinkToAbsDir")
|
||||
urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode())
|
||||
|
||||
statusCode, body, err := sockRequest("PUT", urlPath, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) {
|
||||
c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5349,8 +5349,15 @@ func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) {
|
||||
c.Fatalf("Unexpected output on trusted build:\n%s", out)
|
||||
}
|
||||
|
||||
// Build command does not create untrusted tag
|
||||
//dockerCmd(c, "rmi", repoName)
|
||||
// We should also have a tag reference for the image.
|
||||
if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 {
|
||||
c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out)
|
||||
}
|
||||
|
||||
// We should now be able to remove the tag reference.
|
||||
if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 {
|
||||
c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
|
||||
@@ -5373,3 +5380,41 @@ func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
|
||||
c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) {
|
||||
tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Make a real context directory in this temp directory with a simple
|
||||
// Dockerfile.
|
||||
realContextDirname := filepath.Join(tempDir, "context")
|
||||
if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(
|
||||
filepath.Join(realContextDirname, "Dockerfile"),
|
||||
[]byte(`
|
||||
FROM busybox
|
||||
RUN echo hello world
|
||||
`),
|
||||
os.FileMode(0644),
|
||||
); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Make a symlink to the real context directory.
|
||||
contextSymlinkName := filepath.Join(tempDir, "context_link")
|
||||
if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Executing the build with the symlink as the specified context should
|
||||
// *not* fail.
|
||||
if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 {
|
||||
c.Fatalf("build failed with exit status %d: %s", exitStatus, out)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +1,29 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
var (
|
||||
repoName = fmt.Sprintf("%v/dockercli/busybox-by-dgst", privateRegistryURL)
|
||||
remoteRepoName = "dockercli/busybox-by-dgst"
|
||||
repoName = fmt.Sprintf("%v/%s", privateRegistryURL, remoteRepoName)
|
||||
pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+")
|
||||
digestRegex = regexp.MustCompile("Digest: ([\\S]+)")
|
||||
)
|
||||
|
||||
func setupImage(c *check.C) (string, error) {
|
||||
func setupImage(c *check.C) (digest.Digest, error) {
|
||||
return setupImageWithTag(c, "latest")
|
||||
}
|
||||
|
||||
func setupImageWithTag(c *check.C, tag string) (string, error) {
|
||||
func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) {
|
||||
containerName := "busyboxbydigest"
|
||||
|
||||
dockerCmd(c, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox")
|
||||
@@ -52,7 +56,7 @@ func setupImageWithTag(c *check.C, tag string) (string, error) {
|
||||
}
|
||||
pushDigest := matches[1]
|
||||
|
||||
return pushDigest, nil
|
||||
return digest.Digest(pushDigest), nil
|
||||
}
|
||||
|
||||
func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
|
||||
@@ -72,7 +76,7 @@ func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
|
||||
pullDigest := matches[1]
|
||||
|
||||
// make sure the pushed and pull digests match
|
||||
if pushDigest != pullDigest {
|
||||
if pushDigest.String() != pullDigest {
|
||||
c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
|
||||
}
|
||||
}
|
||||
@@ -95,7 +99,7 @@ func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
|
||||
pullDigest := matches[1]
|
||||
|
||||
// make sure the pushed and pull digests match
|
||||
if pushDigest != pullDigest {
|
||||
if pushDigest.String() != pullDigest {
|
||||
c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest)
|
||||
}
|
||||
}
|
||||
@@ -291,7 +295,7 @@ func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) {
|
||||
out, _ := dockerCmd(c, "images", "--digests")
|
||||
|
||||
// make sure repo shown, tag=<none>, digest = $digest1
|
||||
re1 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest1 + `\s`)
|
||||
re1 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest1.String() + `\s`)
|
||||
if !re1.MatchString(out) {
|
||||
c.Fatalf("expected %q: %s", re1.String(), out)
|
||||
}
|
||||
@@ -319,7 +323,7 @@ func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) {
|
||||
}
|
||||
|
||||
// make sure repo shown, tag=<none>, digest = $digest2
|
||||
re2 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest2 + `\s`)
|
||||
re2 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest2.String() + `\s`)
|
||||
if !re2.MatchString(out) {
|
||||
c.Fatalf("expected %q: %s", re2.String(), out)
|
||||
}
|
||||
@@ -332,7 +336,7 @@ func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) {
|
||||
|
||||
// make sure image 1 has repo, tag, <none> AND repo, <none>, digest
|
||||
reWithTag1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*<none>\s`)
|
||||
reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest1 + `\s`)
|
||||
reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest1.String() + `\s`)
|
||||
if !reWithTag1.MatchString(out) {
|
||||
c.Fatalf("expected %q: %s", reWithTag1.String(), out)
|
||||
}
|
||||
@@ -357,7 +361,7 @@ func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) {
|
||||
|
||||
// make sure image 2 has repo, tag, digest
|
||||
reWithTag2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*<none>\s`)
|
||||
reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest2 + `\s`)
|
||||
reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest2.String() + `\s`)
|
||||
if !reWithTag2.MatchString(out) {
|
||||
c.Fatalf("expected %q: %s", reWithTag2.String(), out)
|
||||
}
|
||||
@@ -401,3 +405,95 @@ func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C)
|
||||
|
||||
dockerCmd(c, "rmi", imageID)
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when
|
||||
// we have modified a manifest blob and its digest cannot be verified.
|
||||
func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
|
||||
manifestDigest, err := setupImage(c)
|
||||
if err != nil {
|
||||
c.Fatalf("error setting up image: %v", err)
|
||||
}
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.readBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest manifest.Manifest
|
||||
if err := json.Unmarshal(manifestBlob, &imgManifest); err != nil {
|
||||
c.Fatalf("unable to decode image manifest from blob: %s", err)
|
||||
}
|
||||
|
||||
// Add a malicious layer digest to the list of layers in the manifest.
|
||||
imgManifest.FSLayers = append(imgManifest.FSLayers, manifest.FSLayer{
|
||||
BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"),
|
||||
})
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.tempMoveBlobData(c, manifestDigest)
|
||||
defer undo()
|
||||
|
||||
alteredManifestBlob, err := json.Marshal(imgManifest)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to encode altered image manifest to JSON: %s", err)
|
||||
}
|
||||
|
||||
s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob)
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the manifest digest.
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError(c, "pull", imageReference)
|
||||
if exitStatus == 0 {
|
||||
c.Fatalf("expected a zero exit status but got %d: %s", exitStatus, out)
|
||||
}
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest)
|
||||
if !strings.Contains(out, expectedErrorMsg) {
|
||||
c.Fatalf("expected error message %q in output: %s", expectedErrorMsg, out)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
|
||||
// we have modified a layer blob and its digest cannot be verified.
|
||||
func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
|
||||
manifestDigest, err := setupImage(c)
|
||||
if err != nil {
|
||||
c.Fatalf("error setting up image: %v", err)
|
||||
}
|
||||
|
||||
// Load the target manifest blob.
|
||||
manifestBlob := s.reg.readBlobContents(c, manifestDigest)
|
||||
|
||||
var imgManifest manifest.Manifest
|
||||
if err := json.Unmarshal(manifestBlob, &imgManifest); err != nil {
|
||||
c.Fatalf("unable to decode image manifest from blob: %s", err)
|
||||
}
|
||||
|
||||
// Next, get the digest of one of the layers from the manifest.
|
||||
targetLayerDigest := imgManifest.FSLayers[0].BlobSum
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// malicious blob of data. NOTE: we defer the returned undo func.
|
||||
undo := s.reg.tempMoveBlobData(c, targetLayerDigest)
|
||||
defer undo()
|
||||
|
||||
// Now make a fake data blob in this directory.
|
||||
s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for."))
|
||||
|
||||
// Now try pulling that image by digest. We should get an error about
|
||||
// digest verification for the target layer digest.
|
||||
|
||||
// Pull from the registry using the <name>@<digest> reference.
|
||||
imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
|
||||
out, exitStatus, _ := dockerCmdWithError(c, "pull", imageReference)
|
||||
if exitStatus == 0 {
|
||||
c.Fatalf("expected a zero exit status but got: %d", exitStatus)
|
||||
}
|
||||
|
||||
expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest)
|
||||
if !strings.Contains(out, expectedErrorMsg) {
|
||||
c.Fatalf("expected error message %q in output: %s", expectedErrorMsg, out)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,6 +130,114 @@ func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check that copying from a container to a local symlink copies to the symlink
|
||||
// target and does not overwrite the local symlink itself.
|
||||
func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) {
|
||||
cID := makeTestContainer(c, testContainerOptions{addContent: true})
|
||||
defer deleteContainer(cID)
|
||||
|
||||
tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir")
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
makeTestContentInDir(c, tmpDir)
|
||||
|
||||
// First, copy a file from the container to a symlink to a file. This
|
||||
// should overwrite the symlink target contents with the source contents.
|
||||
srcPath := containerCpPath(cID, "/file2")
|
||||
dstPath := cpPath(tmpDir, "symlinkToFile1")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, dstPath, "file1"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The file should have the contents of "file2" now.
|
||||
if err := fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a file from the container to a symlink to a directory. This
|
||||
// should copy the file into the symlink target directory.
|
||||
dstPath = cpPath(tmpDir, "symlinkToDir1")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, dstPath, "dir1"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The file should have the contents of "file2" now.
|
||||
if err := fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a file from the container to a symlink to a file that does
|
||||
// not exist (a broken symlink). This should create the target file with
|
||||
// the contents of the source file.
|
||||
dstPath = cpPath(tmpDir, "brokenSymlinkToFileX")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, dstPath, "fileX"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The file should have the contents of "file2" now.
|
||||
if err := fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a directory from the container to a symlink to a local
|
||||
// directory. This should copy the directory into the symlink target
|
||||
// directory and not modify the symlink.
|
||||
srcPath = containerCpPath(cID, "/dir2")
|
||||
dstPath = cpPath(tmpDir, "symlinkToDir1")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, dstPath, "dir1"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The directory should now contain a copy of "dir2".
|
||||
if err := fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a directory from the container to a symlink to a local
|
||||
// directory that does not exist (a broken symlink). This should create
|
||||
// the target as a directory with the contents of the source directory. It
|
||||
// should not modify the symlink.
|
||||
dstPath = cpPath(tmpDir, "brokenSymlinkToDirX")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, dstPath, "dirX"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The "dirX" directory should now be a copy of "dir2".
|
||||
if err := fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Possibilities are reduced to the remaining 10 cases:
|
||||
//
|
||||
// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
|
||||
|
||||
@@ -250,29 +250,185 @@ func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
tmpname := filepath.Join(tmpdir, cpTestName)
|
||||
tmpname := filepath.Join(tmpdir, "container_path")
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
path := path.Join("/", "container_path")
|
||||
|
||||
dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir)
|
||||
|
||||
file, _ := os.Open(tmpname)
|
||||
defer file.Close()
|
||||
|
||||
test, err := ioutil.ReadAll(file)
|
||||
// We should have copied a symlink *NOT* the file itself!
|
||||
linkTarget, err := os.Readlink(tmpname)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
if string(test) == cpHostContents {
|
||||
c.Errorf("output matched host file -- absolute symlink can escape container rootfs")
|
||||
if linkTarget != filepath.FromSlash(cpFullPath) {
|
||||
c.Errorf("symlink target was %q, but expected: %q", linkTarget, cpFullPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that symlinks to a directory behave as expected when copying one from
|
||||
// a container.
|
||||
func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) {
|
||||
out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link")
|
||||
if exitCode != 0 {
|
||||
c.Fatal("failed to create a container", out)
|
||||
}
|
||||
|
||||
if string(test) != cpContainerContents {
|
||||
c.Errorf("output doesn't match the input for absolute symlink")
|
||||
cleanedContainerID := strings.TrimSpace(out)
|
||||
|
||||
out, _ = dockerCmd(c, "wait", cleanedContainerID)
|
||||
if strings.TrimSpace(out) != "0" {
|
||||
c.Fatal("failed to set up container", out)
|
||||
}
|
||||
|
||||
testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
// This copy command should copy the symlink, not the target, into the
|
||||
// temporary directory.
|
||||
dockerCmd(c, "cp", cleanedContainerID+":"+"/dir_link", testDir)
|
||||
|
||||
expectedPath := filepath.Join(testDir, "dir_link")
|
||||
linkTarget, err := os.Readlink(expectedPath)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to read symlink at %q: %v", expectedPath, err)
|
||||
}
|
||||
|
||||
if linkTarget != filepath.FromSlash(cpTestPathParent) {
|
||||
c.Errorf("symlink target was %q, but expected: %q", linkTarget, cpTestPathParent)
|
||||
}
|
||||
|
||||
os.Remove(expectedPath)
|
||||
|
||||
// This copy command should resolve the symlink (note the trailing
|
||||
// seperator), copying the target into the temporary directory.
|
||||
dockerCmd(c, "cp", cleanedContainerID+":"+"/dir_link/", testDir)
|
||||
|
||||
// It *should not* have copied the directory using the target's name, but
|
||||
// used the given name instead.
|
||||
unexpectedPath := filepath.Join(testDir, cpTestPathParent)
|
||||
if stat, err := os.Lstat(unexpectedPath); err == nil {
|
||||
c.Fatalf("target name was copied: %q - %q", stat.Mode(), stat.Name())
|
||||
}
|
||||
|
||||
// It *should* have copied the directory using the asked name "dir_link".
|
||||
stat, err := os.Lstat(expectedPath)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to stat resource at %q: %v", expectedPath, err)
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
c.Errorf("should have copied a directory but got %q instead", stat.Mode())
|
||||
}
|
||||
}
|
||||
|
||||
// Check that symlinks to a directory behave as expected when copying one to a
|
||||
// container.
|
||||
func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) {
|
||||
testRequires(c, SameHostDaemon) // Requires local volume mount bind.
|
||||
|
||||
testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testVol)
|
||||
|
||||
// Create a test container with a local volume. We will test by copying
|
||||
// to the volume path in the container which we can then verify locally.
|
||||
out, exitCode := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox")
|
||||
if exitCode != 0 {
|
||||
c.Fatal("failed to create a container", out)
|
||||
}
|
||||
|
||||
cleanedContainerID := strings.TrimSpace(out)
|
||||
|
||||
// Create a temp directory to hold a test file nested in a direcotry.
|
||||
testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
// This file will be at "/testDir/some/path/test" and will be copied into
|
||||
// the test volume later.
|
||||
hostTestFilename := filepath.Join(testDir, cpFullPath)
|
||||
if err := os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Now create another temp directory to hold a symlink to the
|
||||
// "/testDir/some" directory.
|
||||
linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(linkDir)
|
||||
|
||||
// Then symlink "/linkDir/dir_link" to "/testdir/some".
|
||||
linkTarget := filepath.Join(testDir, cpTestPathParent)
|
||||
localLink := filepath.Join(linkDir, "dir_link")
|
||||
if err := os.Symlink(linkTarget, localLink); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Now copy that symlink into the test volume in the container.
|
||||
dockerCmd(c, "cp", localLink, cleanedContainerID+":/testVol")
|
||||
|
||||
// This copy command should have copied the symlink *not* the target.
|
||||
expectedPath := filepath.Join(testVol, "dir_link")
|
||||
actualLinkTarget, err := os.Readlink(expectedPath)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to read symlink at %q: %v", expectedPath, err)
|
||||
}
|
||||
|
||||
if actualLinkTarget != linkTarget {
|
||||
c.Errorf("symlink target was %q, but expected: %q", actualLinkTarget, linkTarget)
|
||||
}
|
||||
|
||||
// Good, now remove that copied link for the next test.
|
||||
os.Remove(expectedPath)
|
||||
|
||||
// This copy command should resolve the symlink (note the trailing
|
||||
// seperator), copying the target into the test volume directory in the
|
||||
// container.
|
||||
dockerCmd(c, "cp", localLink+"/", cleanedContainerID+":/testVol")
|
||||
|
||||
// It *should not* have copied the directory using the target's name, but
|
||||
// used the given name instead.
|
||||
unexpectedPath := filepath.Join(testVol, cpTestPathParent)
|
||||
if stat, err := os.Lstat(unexpectedPath); err == nil {
|
||||
c.Fatalf("target name was copied: %q - %q", stat.Mode(), stat.Name())
|
||||
}
|
||||
|
||||
// It *should* have copied the directory using the asked name "dir_link".
|
||||
stat, err := os.Lstat(expectedPath)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to stat resource at %q: %v", expectedPath, err)
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
c.Errorf("should have copied a directory but got %q instead", stat.Mode())
|
||||
}
|
||||
|
||||
// And this directory should contain the file copied from the host at the
|
||||
// expected location: "/testVol/dir_link/path/test"
|
||||
expectedFilepath := filepath.Join(testVol, "dir_link/path/test")
|
||||
fileContents, err := ioutil.ReadFile(expectedFilepath)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
if string(fileContents) != cpHostContents {
|
||||
c.Fatalf("file contains %q but expected %q", string(fileContents), cpHostContents)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for #5619
|
||||
|
||||
@@ -146,6 +146,118 @@ func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check that copying from a local path to a symlink in a container copies to
|
||||
// the symlink target and does not overwrite the container symlink itself.
|
||||
func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) {
|
||||
testRequires(c, SameHostDaemon) // Requires local volume mount bind.
|
||||
|
||||
testVol := getTestDir(c, "test-cp-to-symlink-destination-")
|
||||
defer os.RemoveAll(testVol)
|
||||
|
||||
makeTestContentInDir(c, testVol)
|
||||
|
||||
cID := makeTestContainer(c, testContainerOptions{
|
||||
volumes: defaultVolumes(testVol), // Our bind mount is at /vol2
|
||||
})
|
||||
defer deleteContainer(cID)
|
||||
|
||||
// First, copy a local file to a symlink to a file in the container. This
|
||||
// should overwrite the symlink target contents with the source contents.
|
||||
srcPath := cpPath(testVol, "file2")
|
||||
dstPath := containerCpPath(cID, "/vol2/symlinkToFile1")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The file should have the contents of "file2" now.
|
||||
if err := fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a local file to a symlink to a directory in the container.
|
||||
// This should copy the file into the symlink target directory.
|
||||
dstPath = containerCpPath(cID, "/vol2/symlinkToDir1")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The file should have the contents of "file2" now.
|
||||
if err := fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a file to a symlink to a file that does not exist (a broken
|
||||
// symlink) in the container. This should create the target file with the
|
||||
// contents of the source file.
|
||||
dstPath = containerCpPath(cID, "/vol2/brokenSymlinkToFileX")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The file should have the contents of "file2" now.
|
||||
if err := fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a local directory to a symlink to a directory in the
|
||||
// container. This should copy the directory into the symlink target
|
||||
// directory and not modify the symlink.
|
||||
srcPath = cpPath(testVol, "/dir2")
|
||||
dstPath = containerCpPath(cID, "/vol2/symlinkToDir1")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The directory should now contain a copy of "dir2".
|
||||
if err := fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Next, copy a local directory to a symlink to a local directory that does
|
||||
// not exist (a broken symlink) in the container. This should create the
|
||||
// target as a directory with the contents of the source directory. It
|
||||
// should not modify the symlink.
|
||||
dstPath = containerCpPath(cID, "/vol2/brokenSymlinkToDirX")
|
||||
|
||||
if err := runDockerCp(c, srcPath, dstPath); err != nil {
|
||||
c.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
// The symlink should not have been modified.
|
||||
if err := symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// The "dirX" directory should now be a copy of "dir2".
|
||||
if err := fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Possibilities are reduced to the remaining 10 cases:
|
||||
//
|
||||
// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
|
||||
|
||||
@@ -74,8 +74,11 @@ var defaultFileData = []fileData{
|
||||
{ftRegular, "dir4/file3-1", "file4-1"},
|
||||
{ftRegular, "dir4/file3-2", "file4-2"},
|
||||
{ftDir, "dir5", ""},
|
||||
{ftSymlink, "symlink1", "target1"},
|
||||
{ftSymlink, "symlink2", "target2"},
|
||||
{ftSymlink, "symlinkToFile1", "file1"},
|
||||
{ftSymlink, "symlinkToDir1", "dir1"},
|
||||
{ftSymlink, "brokenSymlinkToFileX", "fileX"},
|
||||
{ftSymlink, "brokenSymlinkToDirX", "dirX"},
|
||||
{ftSymlink, "symlinkToAbsDir", "/root"},
|
||||
}
|
||||
|
||||
func defaultMkContentCommand() string {
|
||||
@@ -268,6 +271,21 @@ func fileContentEquals(c *check.C, filename, contents string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) {
|
||||
c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget)
|
||||
|
||||
actualTarget, err := os.Readlink(symlink)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if actualTarget != expectedTarget {
|
||||
return fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func containerStartOutputEquals(c *check.C, cID, contents string) (err error) {
|
||||
c.Logf("checking that container %q start output contains %q\n", cID, contents)
|
||||
|
||||
|
||||
@@ -780,6 +780,18 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) {
|
||||
deleteInterface(c, defaultNetworkBridge)
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) {
|
||||
defaultNetworkBridge := "docker0"
|
||||
deleteInterface(c, defaultNetworkBridge)
|
||||
|
||||
// Program a custom default gateway outside of the container subnet, daemon should accept it and start
|
||||
err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
deleteInterface(c, defaultNetworkBridge)
|
||||
s.d.Restart()
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) {
|
||||
d := s.d
|
||||
|
||||
|
||||
@@ -536,3 +536,10 @@ func (s *DockerSuite) TestExecWithImageUser(c *check.C) {
|
||||
c.Fatalf("exec with user by id expected dockerio user got %s", out)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) {
|
||||
dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top")
|
||||
if _, status := dockerCmd(c, "exec", "parent", "true"); status != 0 {
|
||||
c.Fatalf("exec into a read-only container failed with exit status %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/go-check/check"
|
||||
@@ -260,3 +261,28 @@ func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) {
|
||||
c.Fatalf("Expected rw to be false")
|
||||
}
|
||||
}
|
||||
|
||||
// #14947
|
||||
func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "true")
|
||||
id := strings.TrimSpace(out)
|
||||
startedAt, err := inspectField(id, "State.StartedAt")
|
||||
c.Assert(err, check.IsNil)
|
||||
finishedAt, err := inspectField(id, "State.FinishedAt")
|
||||
c.Assert(err, check.IsNil)
|
||||
created, err := inspectField(id, "Created")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = time.Parse(time.RFC3339Nano, startedAt)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = time.Parse(time.RFC3339Nano, finishedAt)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = time.Parse(time.RFC3339Nano, created)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
created, err = inspectField("busybox", "Created")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = time.Parse(time.RFC3339Nano, created)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-check/check"
|
||||
@@ -87,3 +89,12 @@ func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) {
|
||||
c.Fatal("Container should be in running state after an invalid signal")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestKillofStoppedContainerAPIPre120(c *check.C) {
|
||||
dockerCmd(c, "run", "--name", "docker-kill-test-api", "-d", "busybox", "top")
|
||||
dockerCmd(c, "stop", "docker-kill-test-api")
|
||||
|
||||
status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(status, check.Equals, http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *c
|
||||
|
||||
// Push with wrong passphrases
|
||||
pushCmd = exec.Command(dockerBinary, "push", repoName)
|
||||
s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321", "87654321")
|
||||
s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321")
|
||||
out, _, err = runCommandWithOutput(pushCmd)
|
||||
if err == nil {
|
||||
c.Fatalf("Error missing from trusted push with short targets passphrase: \n%s", out)
|
||||
|
||||
@@ -2242,7 +2242,7 @@ func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) {
|
||||
func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) {
|
||||
testRequires(c, NativeExecDriver)
|
||||
|
||||
for _, f := range []string{"/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/proc/uptime", "/sys/kernel", "/dev/.dont.touch.me"} {
|
||||
for _, f := range []string{"/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me"} {
|
||||
testReadOnlyFile(f, c)
|
||||
}
|
||||
}
|
||||
@@ -2397,7 +2397,10 @@ func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) {
|
||||
func (s *DockerSuite) TestRunReadProcTimer(c *check.C) {
|
||||
testRequires(c, NativeExecDriver)
|
||||
out, code, err := dockerCmdWithError(c, "run", "busybox", "cat", "/proc/timer_stats")
|
||||
if err != nil || code != 0 {
|
||||
if code != 0 {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
if strings.Trim(out, "\n ") != "" {
|
||||
@@ -2414,7 +2417,10 @@ func (s *DockerSuite) TestRunReadProcLatency(c *check.C) {
|
||||
return
|
||||
}
|
||||
out, code, err := dockerCmdWithError(c, "run", "busybox", "cat", "/proc/latency_stats")
|
||||
if err != nil || code != 0 {
|
||||
if code != 0 {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
if strings.Trim(out, "\n ") != "" {
|
||||
@@ -2422,6 +2428,28 @@ func (s *DockerSuite) TestRunReadProcLatency(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) {
|
||||
testRequires(c, Apparmor)
|
||||
|
||||
testReadPaths := []string{
|
||||
"/proc/latency_stats",
|
||||
"/proc/timer_stats",
|
||||
"/proc/kcore",
|
||||
}
|
||||
for i, filePath := range testReadPaths {
|
||||
name := fmt.Sprintf("procsieve-%d", i)
|
||||
shellCmd := fmt.Sprintf("exec 3<%s", filePath)
|
||||
|
||||
out, exitCode, err := dockerCmdWithError(c, "run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
|
||||
if exitCode != 0 {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestMountIntoProc(c *check.C) {
|
||||
testRequires(c, NativeExecDriver)
|
||||
_, code, err := dockerCmdWithError(c, "run", "-v", "/proc//sys", "busybox", "true")
|
||||
@@ -2515,13 +2543,17 @@ func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) {
|
||||
"/proc/sys/kernel/modprobe",
|
||||
"/proc/sys/kernel/core_pattern",
|
||||
"/proc/sysrq-trigger",
|
||||
"/proc/kcore",
|
||||
}
|
||||
for i, filePath := range testWritePaths {
|
||||
name := fmt.Sprintf("writeprocsieve-%d", i)
|
||||
|
||||
shellCmd := fmt.Sprintf("exec 3>%s", filePath)
|
||||
runCmd := exec.Command(dockerBinary, "run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
|
||||
if out, exitCode, err := runCommandWithOutput(runCmd); err == nil || exitCode == 0 {
|
||||
out, code, err := dockerCmdWithError(c, "run", "--privileged", "--security-opt", "apparmor:docker-default", "--name", name, "busybox", "sh", "-c", shellCmd)
|
||||
if code != 0 {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err)
|
||||
}
|
||||
}
|
||||
@@ -2704,3 +2736,42 @@ func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) {
|
||||
c.Fatalf("Missing expected output on trusted push:\n%s", out)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) {
|
||||
testRequires(c, SameHostDaemon)
|
||||
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
|
||||
id := strings.TrimSpace(out)
|
||||
if err := waitRun(id); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
pid1, err := inspectField(id, "State.Pid")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1))
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) {
|
||||
testRequires(c, SameHostDaemon)
|
||||
testRequires(c, Apparmor)
|
||||
|
||||
// Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace
|
||||
// itself, but pid>1 should not be able to trace pid1.
|
||||
_, exitCode, _ := dockerCmdWithError(c, "run", "busybox", "sh", "-c", "readlink /proc/1/ns/net")
|
||||
if exitCode == 0 {
|
||||
c.Fatal("ptrace was not successfully restricted by AppArmor")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) {
|
||||
testRequires(c, SameHostDaemon)
|
||||
testRequires(c, Apparmor)
|
||||
|
||||
_, exitCode, _ := dockerCmdWithError(c, "run", "busybox", "readlink", "/proc/1/ns/net")
|
||||
if exitCode != 0 {
|
||||
c.Fatal("ptrace of self failed.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
@@ -70,3 +71,50 @@ func (t *testRegistryV2) Close() {
|
||||
t.cmd.Process.Kill()
|
||||
os.RemoveAll(t.dir)
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) getBlobFilename(blobDigest digest.Digest) string {
|
||||
// Split the digest into it's algorithm and hex components.
|
||||
dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex()
|
||||
|
||||
// The path to the target blob data looks something like:
|
||||
// baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data"
|
||||
return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", t.dir, dgstAlg, dgstHex[:2], dgstHex)
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) readBlobContents(c *check.C, blobDigest digest.Digest) []byte {
|
||||
// Load the target manifest blob.
|
||||
manifestBlob, err := ioutil.ReadFile(t.getBlobFilename(blobDigest))
|
||||
if err != nil {
|
||||
c.Fatalf("unable to read blob: %s", err)
|
||||
}
|
||||
|
||||
return manifestBlob
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) writeBlobContents(c *check.C, blobDigest digest.Digest, data []byte) {
|
||||
if err := ioutil.WriteFile(t.getBlobFilename(blobDigest), data, os.FileMode(0644)); err != nil {
|
||||
c.Fatalf("unable to write malicious data blob: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) tempMoveBlobData(c *check.C, blobDigest digest.Digest) (undo func()) {
|
||||
tempFile, err := ioutil.TempFile("", "registry-temp-blob-")
|
||||
if err != nil {
|
||||
c.Fatalf("unable to get temporary blob file: %s", err)
|
||||
}
|
||||
tempFile.Close()
|
||||
|
||||
blobFilename := t.getBlobFilename(blobDigest)
|
||||
|
||||
// Move the existing data file aside, so that we can replace it with a
|
||||
// another blob of data.
|
||||
if err := os.Rename(blobFilename, tempFile.Name()); err != nil {
|
||||
os.Remove(tempFile.Name())
|
||||
c.Fatalf("unable to move data blob: %s", err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
os.Rename(tempFile.Name(), blobFilename)
|
||||
os.Remove(tempFile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,8 @@ func newTestNotary(c *check.C) (*testNotary, error) {
|
||||
"trust_service": {
|
||||
"type": "local",
|
||||
"hostname": "",
|
||||
"port": ""
|
||||
"port": "",
|
||||
"key_algorithm": "ed25519"
|
||||
},
|
||||
"logging": {
|
||||
"level": 5
|
||||
@@ -116,25 +117,24 @@ func (t *testNotary) Close() {
|
||||
|
||||
func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) {
|
||||
pwd := "12345678"
|
||||
trustCmdEnv(cmd, s.not.address(), pwd, pwd, pwd)
|
||||
trustCmdEnv(cmd, s.not.address(), pwd, pwd)
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) {
|
||||
pwd := "12345678"
|
||||
trustCmdEnv(cmd, server, pwd, pwd, pwd)
|
||||
trustCmdEnv(cmd, server, pwd, pwd)
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, rootPwd, snapshotPwd, targetPwd string) {
|
||||
trustCmdEnv(cmd, s.not.address(), rootPwd, snapshotPwd, targetPwd)
|
||||
func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, offlinePwd, taggingPwd string) {
|
||||
trustCmdEnv(cmd, s.not.address(), offlinePwd, taggingPwd)
|
||||
}
|
||||
|
||||
func trustCmdEnv(cmd *exec.Cmd, server, rootPwd, snapshotPwd, targetPwd string) {
|
||||
func trustCmdEnv(cmd *exec.Cmd, server, offlinePwd, taggingPwd string) {
|
||||
env := []string{
|
||||
"DOCKER_CONTENT_TRUST=1",
|
||||
fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server),
|
||||
fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd),
|
||||
fmt.Sprintf("DOCKER_CONTENT_TRUST_SNAPSHOT_PASSPHRASE=%s", snapshotPwd),
|
||||
fmt.Sprintf("DOCKER_CONTENT_TRUST_TARGET_PASSPHRASE=%s", targetPwd),
|
||||
fmt.Sprintf("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE=%s", offlinePwd),
|
||||
fmt.Sprintf("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE=%s", taggingPwd),
|
||||
}
|
||||
cmd.Env = append(os.Environ(), env...)
|
||||
}
|
||||
|
||||
@@ -37,11 +37,13 @@ type (
|
||||
Compression Compression
|
||||
NoLchown bool
|
||||
ChownOpts *TarChownOptions
|
||||
Name string
|
||||
IncludeSourceDir bool
|
||||
// When unpacking, specifies whether overwriting a directory with a
|
||||
// non-directory is allowed and vice versa.
|
||||
NoOverwriteDirNonDir bool
|
||||
// For each include when creating an archive, the included name will be
|
||||
// replaced with the matching name from this map.
|
||||
RebaseNames map[string]string
|
||||
}
|
||||
|
||||
// Archiver allows the reuse of most utility functions of this package
|
||||
@@ -454,8 +456,9 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
|
||||
seen := make(map[string]bool)
|
||||
|
||||
var renamedRelFilePath string // For when tar.Options.Name is set
|
||||
for _, include := range options.IncludeFiles {
|
||||
rebaseName := options.RebaseNames[include]
|
||||
|
||||
// We can't use filepath.Join(srcPath, include) because this will
|
||||
// clean away a trailing "." or "/" which may be important.
|
||||
walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
|
||||
@@ -503,14 +506,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
seen[relFilePath] = true
|
||||
|
||||
// TODO Windows: Verify if this needs to be os.Pathseparator
|
||||
// Rename the base resource
|
||||
if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
|
||||
renamedRelFilePath = relFilePath
|
||||
}
|
||||
// Set this to make sure the items underneath also get renamed
|
||||
if options.Name != "" {
|
||||
relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
|
||||
// Rename the base resource.
|
||||
if rebaseName != "" {
|
||||
var replacement string
|
||||
if rebaseName != string(filepath.Separator) {
|
||||
// Special case the root directory to replace with an
|
||||
// empty string instead so that we don't end up with
|
||||
// double slashes in the paths.
|
||||
replacement = rebaseName
|
||||
}
|
||||
|
||||
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
|
||||
}
|
||||
|
||||
if err := ta.addTarFile(filePath, relFilePath); err != nil {
|
||||
@@ -633,8 +639,20 @@ loop:
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||
if archive == nil {
|
||||
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true)
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, false)
|
||||
}
|
||||
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
dest = filepath.Clean(dest)
|
||||
@@ -644,12 +662,18 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
decompressedArchive, err := DecompressStream(archive)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
var r io.Reader = tarArchive
|
||||
if decompress {
|
||||
decompressedArchive, err := DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
r = decompressedArchive
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
return Unpack(decompressedArchive, dest, options)
|
||||
|
||||
return Unpack(r, dest, options)
|
||||
}
|
||||
|
||||
func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
|
||||
@@ -695,7 +695,7 @@ func TestTarWithOptions(t *testing.T) {
|
||||
{&TarOptions{ExcludePatterns: []string{"2"}}, 1},
|
||||
{&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2},
|
||||
{&TarOptions{IncludeFiles: []string{"1", "1"}}, 2},
|
||||
{&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4},
|
||||
{&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
changes, err := tarUntar(t, origin, testCase.opts)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
@@ -64,34 +63,33 @@ func SpecifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its
|
||||
// parent directory and its basename in that directory.
|
||||
func SplitPathDirEntry(localizedPath string) (dir, base string) {
|
||||
normalizedPath := filepath.ToSlash(localizedPath)
|
||||
vol := filepath.VolumeName(normalizedPath)
|
||||
normalizedPath = normalizedPath[len(vol):]
|
||||
// SplitPathDirEntry splits the given path between its directory name and its
|
||||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(path)
|
||||
|
||||
if normalizedPath == "/" {
|
||||
// Specifies the root path.
|
||||
return filepath.FromSlash(vol + normalizedPath), "."
|
||||
if SpecifiesCurrentDir(path) {
|
||||
cleanedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
trimmedPath := vol + strings.TrimRight(normalizedPath, "/")
|
||||
|
||||
dir = filepath.FromSlash(path.Dir(trimmedPath))
|
||||
base = filepath.FromSlash(path.Base(trimmedPath))
|
||||
|
||||
return dir, base
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
}
|
||||
|
||||
// TarResource archives the resource at the given sourcePath into a Tar
|
||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourcePath string) (content Archive, err error) {
|
||||
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||
}
|
||||
|
||||
// TarResourceRebase is like TarResource but renames the first path element of
|
||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
@@ -99,22 +97,6 @@ func TarResource(sourcePath string) (content Archive, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) {
|
||||
// In the case where the source path is a symbolic link AND it ends
|
||||
// with a path separator, we will want to evaluate the symbolic link.
|
||||
trimmedPath := sourcePath[:len(sourcePath)-1]
|
||||
stat, err := os.Lstat(trimmedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if stat.Mode()&os.ModeSymlink != 0 {
|
||||
if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Separate the source path between it's directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
@@ -127,32 +109,137 @@ func TarResource(sourcePath string) (content Archive, err error) {
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
RebaseName string
|
||||
}
|
||||
|
||||
// CopyInfoStatPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource. If mustExist is true, then
|
||||
// it is an error if there is no file or directory at the given path.
|
||||
func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) {
|
||||
pathInfo := CopyInfo{Path: path}
|
||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the source of an archive copy
|
||||
// operation. The given path should be an absolute local path. A source path
|
||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||
// on Unix). As it is to be a copy source, the path must exist.
|
||||
func CopyInfoSourcePath(path string) (CopyInfo, error) {
|
||||
// Split the given path into its Directory and Base components. We will
|
||||
// evaluate symlinks in the directory component then append the base.
|
||||
dirPath, basePath := filepath.Split(path)
|
||||
|
||||
fileInfo, err := os.Lstat(path)
|
||||
|
||||
if err == nil {
|
||||
pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir()
|
||||
} else if os.IsNotExist(err) && !mustExist {
|
||||
err = nil
|
||||
resolvedDirPath, err := filepath.EvalSymlinks(dirPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return pathInfo, err
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath
|
||||
|
||||
var rebaseName string
|
||||
if HasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
// In the case where the path had a trailing separator and a symlink
|
||||
// evaluation has changed the last path component, we will need to
|
||||
// rebase the name in the archive that is being copied to match the
|
||||
// originally requested name.
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return CopyInfo{
|
||||
Path: resolvedPath,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the destination of an archive copy
|
||||
// operation. The given path should be an absolute local path.
|
||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||
originalPath := path
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
|
||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||
// The path exists and is not a symlink.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// While the path is a symlink.
|
||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||
if n > maxSymlinkIter {
|
||||
// Don't follow symlinks more than this arbitrary number of times.
|
||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||
}
|
||||
|
||||
// The path is a symbolic link. We need to evaluate it so that the
|
||||
// destination of the copy operation is the link target and not the
|
||||
// link itself. This is notably different than CopyInfoSourcePath which
|
||||
// only evaluates symlinks before the last appearing path separator.
|
||||
// Also note that it is okay if the last path element is a broken
|
||||
// symlink as the copy operation should create the target.
|
||||
var linkTarget string
|
||||
|
||||
linkTarget, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
path = linkTarget
|
||||
stat, err = os.Lstat(path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// It's okay if the destination path doesn't exist. We can still
|
||||
// continue the copy operation if the parent directory exists.
|
||||
if !os.IsNotExist(err) {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
|
||||
parentDirStat, err := os.Lstat(dstParent)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
if !parentDirStat.IsDir() {
|
||||
return CopyInfo{}, ErrNotDirectory
|
||||
}
|
||||
|
||||
return CopyInfo{Path: path}, nil
|
||||
}
|
||||
|
||||
// The path exists after resolving symlinks.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
@@ -210,6 +297,13 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
|
||||
// rebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurance of oldBase with newBase at the beginning of entry names.
|
||||
func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
|
||||
if oldBase == "/" {
|
||||
// If oldBase specifies the root directory, use an empty string as
|
||||
// oldBase instead so that newBase doesn't replace the path separator
|
||||
// that all paths will start with.
|
||||
oldBase = ""
|
||||
}
|
||||
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
@@ -259,11 +353,11 @@ func CopyResource(srcPath, dstPath string) error {
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||
|
||||
if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil {
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcPath)
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -275,24 +369,13 @@ func CopyResource(srcPath, dstPath string) error {
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
|
||||
dstInfo, err := CopyInfoStatPath(dstPath, false)
|
||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||
// ensure that at least the parent directory exists.
|
||||
dstInfo, err := CopyInfoDestinationPath(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dstInfo.Exists {
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(dstPath)
|
||||
|
||||
dstStat, err := os.Lstat(dstParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !dstStat.IsDir() {
|
||||
return ErrNotDirectory
|
||||
}
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -138,13 +138,7 @@ func TestCopyErrSrcNotExists(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
content, err := TarResource(filepath.Join(tmpDirA, "file1"))
|
||||
if err == nil {
|
||||
content.Close()
|
||||
t.Fatal("expected IsNotExist error, but got nil instead")
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1")); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
@@ -158,13 +152,7 @@ func TestCopyErrSrcNotDir(t *testing.T) {
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
content, err := TarResource(joinTrailingSep(tmpDirA, "file1"))
|
||||
if err == nil {
|
||||
content.Close()
|
||||
t.Fatal("expected IsNotDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if !isNotDir(err) {
|
||||
if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1")); !isNotDir(err) {
|
||||
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
@@ -181,7 +169,7 @@ func TestCopyErrDstParentNotExists(t *testing.T) {
|
||||
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
|
||||
|
||||
// Try with a file source.
|
||||
content, err := TarResource(srcInfo.Path)
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
@@ -199,7 +187,7 @@ func TestCopyErrDstParentNotExists(t *testing.T) {
|
||||
// Try with a directory source.
|
||||
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
|
||||
|
||||
content, err = TarResource(srcInfo.Path)
|
||||
content, err = TarResource(srcInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
@@ -228,7 +216,7 @@ func TestCopyErrDstNotDir(t *testing.T) {
|
||||
// Try with a file source.
|
||||
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
|
||||
|
||||
content, err := TarResource(srcInfo.Path)
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
@@ -245,7 +233,7 @@ func TestCopyErrDstNotDir(t *testing.T) {
|
||||
// Try with a directory source.
|
||||
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
|
||||
|
||||
content, err = TarResource(srcInfo.Path)
|
||||
content, err = TarResource(srcInfo)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
@@ -173,10 +173,24 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
@@ -186,9 +200,11 @@ func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
}
|
||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||
|
||||
layer, err = DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if decompress {
|
||||
layer, err = DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return UnpackLayer(dest, layer)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package chrootarchive
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -17,6 +18,18 @@ var chrootArchiver = &archive.Archiver{Untar: Untar}
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true)
|
||||
}
|
||||
|
||||
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, false)
|
||||
}
|
||||
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
|
||||
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
@@ -35,13 +48,17 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error
|
||||
}
|
||||
}
|
||||
|
||||
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
r := ioutil.NopCloser(tarArchive)
|
||||
if decompress {
|
||||
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
r = decompressedArchive
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
|
||||
return invokeUnpack(decompressedArchive, dest, options)
|
||||
return invokeUnpack(r, dest, options)
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
||||
|
||||
@@ -49,7 +49,7 @@ func untar() {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.ReadCloser, dest string, options *archive.TarOptions) error {
|
||||
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
|
||||
// We can't pass a potentially large exclude list directly via cmd line
|
||||
// because we easily overrun the kernel's max argument/environment size
|
||||
|
||||
@@ -65,20 +65,36 @@ func applyLayer() {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can only be
|
||||
// uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) {
|
||||
return applyLayerHandler(dest, layer, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer archive.ArchiveReader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, false)
|
||||
}
|
||||
|
||||
func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
|
||||
defer decompressed.Close()
|
||||
|
||||
cmd := reexec.Command("docker-applyLayer", dest)
|
||||
cmd.Stdin = decompressed
|
||||
cmd.Stdin = layer
|
||||
|
||||
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
|
||||
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||
|
||||
@@ -72,10 +72,10 @@ func certPool(caFile string) (*x509.CertPool, error) {
|
||||
certPool := x509.NewCertPool()
|
||||
pem, err := ioutil.ReadFile(caFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not read CA certificate %s: %v", caFile, err)
|
||||
return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err)
|
||||
}
|
||||
if !certPool.AppendCertsFromPEM(pem) {
|
||||
return nil, fmt.Errorf("failed to append certificates from PEM file: %s", caFile)
|
||||
return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
|
||||
}
|
||||
s := certPool.Subjects()
|
||||
subjects := make([]string, len(s))
|
||||
@@ -116,9 +116,9 @@ func Server(options Options) (*tls.Config, error) {
|
||||
tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", options.CertFile, options.KeyFile, err)
|
||||
return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
|
||||
}
|
||||
return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
|
||||
return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{tlsCert}
|
||||
if options.ClientAuth >= tls.VerifyClientCertIfGiven {
|
||||
|
||||
@@ -111,8 +111,6 @@ func (idx *TruncIndex) Get(s string) (string, error) {
|
||||
|
||||
// Iterates over all stored IDs, and passes each of them to the given handler
|
||||
func (idx *TruncIndex) Iterate(handler func(id string)) {
|
||||
idx.RLock()
|
||||
defer idx.RUnlock()
|
||||
idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
|
||||
handler(string(prefix))
|
||||
return nil
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/pkg/tlsconfig"
|
||||
)
|
||||
|
||||
// for mocking in unit tests
|
||||
@@ -45,10 +44,11 @@ func scanForAPIVersion(address string) (string, APIVersion) {
|
||||
|
||||
// NewEndpoint parses the given address to return a registry endpoint.
|
||||
func NewEndpoint(index *IndexInfo, metaHeaders http.Header) (*Endpoint, error) {
|
||||
// *TODO: Allow per-registry configuration of endpoints.
|
||||
tlsConfig := tlsconfig.ServerDefault
|
||||
tlsConfig.InsecureSkipVerify = !index.Secure
|
||||
endpoint, err := newEndpoint(index.GetAuthConfigKey(), &tlsConfig, metaHeaders)
|
||||
tlsConfig, err := newTLSConfig(index.Name, index.Secure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoint, err := newEndpoint(index.GetAuthConfigKey(), tlsConfig, metaHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/autogen/dockerversion"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
@@ -49,6 +50,23 @@ func init() {
|
||||
dockerUserAgent = useragent.AppendVersions("", httpVersion...)
|
||||
}
|
||||
|
||||
func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) {
|
||||
// PreferredServerCipherSuites should have no effect
|
||||
tlsConfig := tlsconfig.ServerDefault
|
||||
|
||||
tlsConfig.InsecureSkipVerify = !isSecure
|
||||
|
||||
if isSecure {
|
||||
hostDir := filepath.Join(CertsDir, hostname)
|
||||
logrus.Debugf("hostDir: %s", hostDir)
|
||||
if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &tlsConfig, nil
|
||||
}
|
||||
|
||||
func hasFile(files []os.FileInfo, name string) bool {
|
||||
for _, f := range files {
|
||||
if f.Name() == name {
|
||||
@@ -194,8 +212,14 @@ func ContinueOnError(err error) bool {
|
||||
return ContinueOnError(v.Err)
|
||||
case errcode.Error:
|
||||
return shouldV2Fallback(v)
|
||||
case *client.UnexpectedHTTPResponseError:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
// let's be nice and fallback if the error is a completely
|
||||
// unexpected one.
|
||||
// If new errors have to be handled in some way, please
|
||||
// add them to the switch above.
|
||||
return true
|
||||
}
|
||||
|
||||
// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the
|
||||
|
||||
@@ -5,10 +5,8 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
"github.com/docker/docker/pkg/tlsconfig"
|
||||
@@ -99,22 +97,7 @@ func (e APIEndpoint) ToV1Endpoint(metaHeaders http.Header) (*Endpoint, error) {
|
||||
|
||||
// TLSConfig constructs a client TLS configuration based on server defaults
|
||||
func (s *Service) TLSConfig(hostname string) (*tls.Config, error) {
|
||||
// PreferredServerCipherSuites should have no effect
|
||||
tlsConfig := tlsconfig.ServerDefault
|
||||
|
||||
isSecure := s.Config.isSecureIndex(hostname)
|
||||
|
||||
tlsConfig.InsecureSkipVerify = !isSecure
|
||||
|
||||
if isSecure {
|
||||
hostDir := filepath.Join(CertsDir, hostname)
|
||||
logrus.Debugf("hostDir: %s", hostDir)
|
||||
if err := ReadCertsDirectory(&tlsConfig, hostDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &tlsConfig, nil
|
||||
return newTLSConfig(hostname, s.Config.isSecureIndex(hostname))
|
||||
}
|
||||
|
||||
func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) {
|
||||
|
||||
@@ -231,9 +231,9 @@ type HostConfig struct {
|
||||
CpusetCpus string // CpusetCpus 0-2, 0,1
|
||||
CpusetMems string // CpusetMems 0-2, 0,1
|
||||
CpuQuota int64
|
||||
BlkioWeight int64 // Block IO weight (relative weight vs. other containers)
|
||||
OomKillDisable bool // Whether to disable OOM Killer or not
|
||||
MemorySwappiness int64 // Tuning container memory swappiness behaviour
|
||||
BlkioWeight int64 // Block IO weight (relative weight vs. other containers)
|
||||
OomKillDisable bool // Whether to disable OOM Killer or not
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
Privileged bool
|
||||
PortBindings nat.PortMap
|
||||
Links []string
|
||||
|
||||
@@ -351,7 +351,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe
|
||||
CpuQuota: *flCpuQuota,
|
||||
BlkioWeight: *flBlkioWeight,
|
||||
OomKillDisable: *flOomKillDisable,
|
||||
MemorySwappiness: swappiness,
|
||||
MemorySwappiness: flSwappiness,
|
||||
Privileged: *flPrivileged,
|
||||
PortBindings: portBindings,
|
||||
Links: flLinks.GetAll(),
|
||||
|
||||
@@ -10,6 +10,7 @@ ENV DOCKER_BUILDTAGS include_rados
|
||||
|
||||
WORKDIR $DISTRIBUTION_DIR
|
||||
COPY . $DISTRIBUTION_DIR
|
||||
COPY cmd/registry/config-dev.yml $DISTRIBUTION_DIR/cmd/registry/config.yml
|
||||
RUN make PREFIX=/go clean binaries
|
||||
|
||||
VOLUME ["/var/lib/registry"]
|
||||
|
||||
@@ -27,6 +27,9 @@ var (
|
||||
// ErrBlobInvalidLength returned when the blob has an expected length on
|
||||
// commit, meaning mismatched with the descriptor or an invalid value.
|
||||
ErrBlobInvalidLength = errors.New("blob invalid length")
|
||||
|
||||
// ErrUnsupported returned when an unsupported operation is attempted
|
||||
ErrUnsupported = errors.New("unsupported operation")
|
||||
)
|
||||
|
||||
// ErrBlobInvalidDigest returned when digest check fails.
|
||||
@@ -70,6 +73,11 @@ type BlobStatter interface {
|
||||
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
|
||||
}
|
||||
|
||||
// BlobDeleter enables deleting blobs from storage.
|
||||
type BlobDeleter interface {
|
||||
Delete(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// BlobDescriptorService manages metadata about a blob by digest. Most
|
||||
// implementations will not expose such an interface explicitly. Such mappings
|
||||
// should be maintained by interacting with the BlobIngester. Hence, this is
|
||||
@@ -87,6 +95,9 @@ type BlobDescriptorService interface {
|
||||
// the restriction that the algorithm of the descriptor must match the
|
||||
// canonical algorithm (ie sha256) of the annotator.
|
||||
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
|
||||
|
||||
// Clear enables descriptors to be unlinked
|
||||
Clear(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// ReadSeekCloser is the primary reader type for blob data, combining
|
||||
@@ -183,8 +194,9 @@ type BlobService interface {
|
||||
}
|
||||
|
||||
// BlobStore represent the entire suite of blob related operations. Such an
|
||||
// implementation can access, read, write and serve blobs.
|
||||
// implementation can access, read, write, delete and serve blobs.
|
||||
type BlobStore interface {
|
||||
BlobService
|
||||
BlobServer
|
||||
BlobDeleter
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
@@ -14,11 +16,19 @@ type Context interface {
|
||||
// provided as the main background context.
|
||||
type instanceContext struct {
|
||||
Context
|
||||
id string // id of context, logged as "instance.id"
|
||||
id string // id of context, logged as "instance.id"
|
||||
once sync.Once // once protect generation of the id
|
||||
}
|
||||
|
||||
func (ic *instanceContext) Value(key interface{}) interface{} {
|
||||
if key == "instance.id" {
|
||||
ic.once.Do(func() {
|
||||
// We want to lazy initialize the UUID such that we don't
|
||||
// call a random generator from the package initialization
|
||||
// code. For various reasons random could not be available
|
||||
// https://github.com/docker/distribution/issues/782
|
||||
ic.id = uuid.Generate().String()
|
||||
})
|
||||
return ic.id
|
||||
}
|
||||
|
||||
@@ -27,7 +37,6 @@ func (ic *instanceContext) Value(key interface{}) interface{} {
|
||||
|
||||
var background = &instanceContext{
|
||||
Context: context.Background(),
|
||||
id: uuid.Generate().String(),
|
||||
}
|
||||
|
||||
// Background returns a non-nil, empty Context. The background context
|
||||
|
||||
@@ -3,8 +3,6 @@ package context
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -101,8 +99,3 @@ func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
|
||||
|
||||
return logger.WithFields(fields)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// inject a logger into the uuid library.
|
||||
uuid.Loggerf = GetLogger(Background()).Warnf
|
||||
}
|
||||
|
||||
@@ -398,6 +398,8 @@ var routeDescriptors = []RouteDescriptor{
|
||||
Description: "Fetch the tags under the repository identified by `name`.",
|
||||
Requests: []RequestDescriptor{
|
||||
{
|
||||
Name: "Tags",
|
||||
Description: "Return all tags for the repository",
|
||||
Headers: []ParameterDescriptor{
|
||||
hostHeader,
|
||||
authHeader,
|
||||
@@ -455,6 +457,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Tags Paginated",
|
||||
Description: "Return a portion of the tags for the specified repository.",
|
||||
PathParameters: []ParameterDescriptor{nameParameterDescriptor},
|
||||
QueryParameters: paginationParameters,
|
||||
@@ -483,6 +486,30 @@ var routeDescriptors = []RouteDescriptor{
|
||||
},
|
||||
},
|
||||
},
|
||||
Failures: []ResponseDescriptor{
|
||||
{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Description: "The repository is not known to the registry.",
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/json; charset=utf-8",
|
||||
Format: errorsBody,
|
||||
},
|
||||
ErrorCodes: []errcode.ErrorCode{
|
||||
ErrorCodeNameUnknown,
|
||||
},
|
||||
},
|
||||
{
|
||||
StatusCode: http.StatusUnauthorized,
|
||||
Description: "The client does not have access to the repository.",
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/json; charset=utf-8",
|
||||
Format: errorsBody,
|
||||
},
|
||||
ErrorCodes: []errcode.ErrorCode{
|
||||
ErrorCodeUnauthorized,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -580,7 +607,7 @@ var routeDescriptors = []RouteDescriptor{
|
||||
Successes: []ResponseDescriptor{
|
||||
{
|
||||
Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.",
|
||||
StatusCode: http.StatusAccepted,
|
||||
StatusCode: http.StatusCreated,
|
||||
Headers: []ParameterDescriptor{
|
||||
{
|
||||
Name: "Location",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
)
|
||||
|
||||
@@ -209,7 +210,7 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token string, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if !client.SuccessStatus(resp.StatusCode) {
|
||||
return "", fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusAccepted {
|
||||
if !SuccessStatus(resp.StatusCode) {
|
||||
return 0, hbu.handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusAccepted {
|
||||
if !SuccessStatus(resp.StatusCode) {
|
||||
return 0, hbu.handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
@@ -142,7 +142,7 @@ func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descrip
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
if !SuccessStatus(resp.StatusCode) {
|
||||
return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
@@ -160,12 +160,10 @@ func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNoContent, http.StatusNotFound:
|
||||
if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
|
||||
return nil
|
||||
default:
|
||||
return hbu.handleErrorResponse(resp)
|
||||
}
|
||||
return hbu.handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func (hbu *httpBlobUpload) Close() error {
|
||||
|
||||
@@ -61,3 +61,9 @@ func handleErrorResponse(resp *http.Response) error {
|
||||
}
|
||||
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
||||
}
|
||||
|
||||
// SuccessStatus returns true if the argument is a successful HTTP response
|
||||
// code (in the range 200 - 399 inclusive).
|
||||
func SuccessStatus(status int) bool {
|
||||
return status >= 200 && status <= 399
|
||||
}
|
||||
|
||||
@@ -70,8 +70,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
var ctlg struct {
|
||||
Repositories []string `json:"repositories"`
|
||||
}
|
||||
@@ -90,8 +89,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
|
||||
if link == "" {
|
||||
returnErr = io.EOF
|
||||
}
|
||||
|
||||
default:
|
||||
} else {
|
||||
return 0, handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
@@ -199,8 +197,7 @@ func (ms *manifests) Tags() ([]string, error) {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -214,11 +211,10 @@ func (ms *manifests) Tags() ([]string, error) {
|
||||
}
|
||||
|
||||
return tagsResponse.Tags, nil
|
||||
case http.StatusNotFound:
|
||||
} else if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, handleErrorResponse(resp)
|
||||
}
|
||||
return nil, handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func (ms *manifests) Exists(dgst digest.Digest) (bool, error) {
|
||||
@@ -238,14 +234,12 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
return true, nil
|
||||
case http.StatusNotFound:
|
||||
} else if resp.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
default:
|
||||
return false, handleErrorResponse(resp)
|
||||
}
|
||||
return false, handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) {
|
||||
@@ -254,13 +248,14 @@ func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) {
|
||||
return ms.GetByTag(dgst.String())
|
||||
}
|
||||
|
||||
// AddEtagToTag allows a client to supply an eTag to GetByTag which will
|
||||
// be used for a conditional HTTP request. If the eTag matches, a nil
|
||||
// manifest and nil error will be returned.
|
||||
func AddEtagToTag(tagName, dgst string) distribution.ManifestServiceOption {
|
||||
// AddEtagToTag allows a client to supply an eTag to GetByTag which will be
|
||||
// used for a conditional HTTP request. If the eTag matches, a nil manifest
|
||||
// and nil error will be returned. etag is automatically quoted when added to
|
||||
// this map.
|
||||
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
|
||||
return func(ms distribution.ManifestService) error {
|
||||
if ms, ok := ms.(*manifests); ok {
|
||||
ms.etags[tagName] = dgst
|
||||
ms.etags[tag] = fmt.Sprintf(`"%s"`, etag)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("etag options is a client-only option")
|
||||
@@ -293,8 +288,9 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if resp.StatusCode == http.StatusNotModified {
|
||||
return nil, nil
|
||||
} else if SuccessStatus(resp.StatusCode) {
|
||||
var sm manifest.SignedManifest
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
@@ -302,11 +298,8 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic
|
||||
return nil, err
|
||||
}
|
||||
return &sm, nil
|
||||
case http.StatusNotModified:
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, handleErrorResponse(resp)
|
||||
}
|
||||
return nil, handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func (ms *manifests) Put(m *manifest.SignedManifest) error {
|
||||
@@ -328,13 +321,11 @@ func (ms *manifests) Put(m *manifest.SignedManifest) error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusAccepted:
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
// TODO(dmcgowan): make use of digest header
|
||||
return nil
|
||||
default:
|
||||
return handleErrorResponse(resp)
|
||||
}
|
||||
return handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func (ms *manifests) Delete(dgst digest.Digest) error {
|
||||
@@ -353,12 +344,10 @@ func (ms *manifests) Delete(dgst digest.Digest) error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
return nil
|
||||
default:
|
||||
return handleErrorResponse(resp)
|
||||
}
|
||||
return handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
type blobs struct {
|
||||
@@ -366,7 +355,8 @@ type blobs struct {
|
||||
ub *v2.URLBuilder
|
||||
client *http.Client
|
||||
|
||||
statter distribution.BlobStatter
|
||||
statter distribution.BlobDescriptorService
|
||||
distribution.BlobDeleter
|
||||
}
|
||||
|
||||
func sanitizeLocation(location, source string) (string, error) {
|
||||
@@ -459,8 +449,7 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusAccepted:
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
// TODO(dmcgowan): Check for invalid UUID
|
||||
uuid := resp.Header.Get("Docker-Upload-UUID")
|
||||
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
|
||||
@@ -475,15 +464,18 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
|
||||
startedAt: time.Now(),
|
||||
location: location,
|
||||
}, nil
|
||||
default:
|
||||
return nil, handleErrorResponse(resp)
|
||||
}
|
||||
return nil, handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
return bs.statter.Clear(ctx, dgst)
|
||||
}
|
||||
|
||||
type blobStatter struct {
|
||||
name string
|
||||
ub *v2.URLBuilder
|
||||
@@ -502,8 +494,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
lengthHeader := resp.Header.Get("Content-Length")
|
||||
length, err := strconv.ParseInt(lengthHeader, 10, 64)
|
||||
if err != nil {
|
||||
@@ -515,11 +506,10 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi
|
||||
Size: length,
|
||||
Digest: dgst,
|
||||
}, nil
|
||||
case http.StatusNotFound:
|
||||
} else if resp.StatusCode == http.StatusNotFound {
|
||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||
default:
|
||||
return distribution.Descriptor{}, handleErrorResponse(resp)
|
||||
}
|
||||
return distribution.Descriptor{}, handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func buildCatalogValues(maxEntries int, last string) url.Values {
|
||||
@@ -535,3 +525,30 @@ func buildCatalogValues(maxEntries int, last string) url.Values {
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("DELETE", blobURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := bs.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if SuccessStatus(resp.StatusCode) {
|
||||
return nil
|
||||
}
|
||||
return handleErrorResponse(resp)
|
||||
}
|
||||
|
||||
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -154,10 +154,11 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case resp.StatusCode == 200:
|
||||
// Normally would use client.SuccessStatus, but that would be a cyclic
|
||||
// import
|
||||
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
|
||||
hrs.rc = resp.Body
|
||||
default:
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
|
||||
}
|
||||
|
||||
@@ -26,13 +26,13 @@ type MetricsTracker interface {
|
||||
|
||||
type cachedBlobStatter struct {
|
||||
cache distribution.BlobDescriptorService
|
||||
backend distribution.BlobStatter
|
||||
backend distribution.BlobDescriptorService
|
||||
tracker MetricsTracker
|
||||
}
|
||||
|
||||
// NewCachedBlobStatter creates a new statter which prefers a cache and
|
||||
// falls back to a backend.
|
||||
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobStatter) distribution.BlobStatter {
|
||||
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
|
||||
return &cachedBlobStatter{
|
||||
cache: cache,
|
||||
backend: backend,
|
||||
@@ -41,7 +41,7 @@ func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend dist
|
||||
|
||||
// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
|
||||
// falls back to a backend. Hits and misses will send to the tracker.
|
||||
func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobStatter, tracker MetricsTracker) distribution.BlobStatter {
|
||||
func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
|
||||
return &cachedBlobStatter{
|
||||
cache: cache,
|
||||
backend: backend,
|
||||
@@ -77,4 +77,25 @@ fallback:
|
||||
}
|
||||
|
||||
return desc, err
|
||||
|
||||
}
|
||||
|
||||
func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
err := cbds.cache.Clear(ctx, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cbds.backend.Clear(ctx, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
|
||||
context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +44,10 @@ func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgs
|
||||
return imbdcp.global.Stat(ctx, dgst)
|
||||
}
|
||||
|
||||
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
return imbdcp.global.Clear(ctx, dgst)
|
||||
}
|
||||
|
||||
func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
_, err := imbdcp.Stat(ctx, dgst)
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
@@ -80,6 +84,14 @@ func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Co
|
||||
return rsimbdcp.repository.Stat(ctx, dgst)
|
||||
}
|
||||
|
||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
if rsimbdcp.repository == nil {
|
||||
return distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
return rsimbdcp.repository.Clear(ctx, dgst)
|
||||
}
|
||||
|
||||
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if rsimbdcp.repository == nil {
|
||||
// allocate map since we are setting it now.
|
||||
@@ -133,6 +145,14 @@ func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
mbdc.mu.Lock()
|
||||
defer mbdc.mu.Unlock()
|
||||
|
||||
delete(mbdc.descriptors, dgst)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return err
|
||||
|
||||
@@ -139,3 +139,40 @@ func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provi
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider BlobDescriptorCacheProvider) {
|
||||
localDigest := digest.Digest("sha384:abc")
|
||||
expected := distribution.Descriptor{
|
||||
Digest: "sha256:abc",
|
||||
Size: 10,
|
||||
MediaType: "application/octet-stream"}
|
||||
|
||||
cache, err := provider.RepositoryScoped("foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting scoped cache: %v", err)
|
||||
}
|
||||
|
||||
if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil {
|
||||
t.Fatalf("error setting descriptor: %v", err)
|
||||
}
|
||||
|
||||
desc, err := cache.Stat(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error statting fake2:abc: %v", err)
|
||||
}
|
||||
|
||||
if expected != desc {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
|
||||
}
|
||||
|
||||
err = cache.Clear(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error deleting descriptor")
|
||||
}
|
||||
|
||||
nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||
err = cache.Clear(ctx, nonExistantDigest)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error deleting unknown descriptor")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
@@ -30,7 +29,7 @@ var (
|
||||
|
||||
// Loggerf can be used to override the default logging destination. Such
|
||||
// log messages in this library should be logged at warning or higher.
|
||||
Loggerf = log.Printf
|
||||
Loggerf = func(format string, args ...interface{}) {}
|
||||
)
|
||||
|
||||
// UUID represents a UUID value. UUIDs can be compared and set to other values
|
||||
@@ -49,6 +48,7 @@ func Generate() (u UUID) {
|
||||
|
||||
var (
|
||||
totalBackoff time.Duration
|
||||
count int
|
||||
retries int
|
||||
)
|
||||
|
||||
@@ -60,9 +60,10 @@ func Generate() (u UUID) {
|
||||
time.Sleep(b)
|
||||
totalBackoff += b
|
||||
|
||||
_, err := io.ReadFull(rand.Reader, u[:])
|
||||
n, err := io.ReadFull(rand.Reader, u[count:])
|
||||
if err != nil {
|
||||
if retryOnError(err) && retries < maxretries {
|
||||
count += n
|
||||
retries++
|
||||
Loggerf("error generating version 4 uuid, retrying: %v", err)
|
||||
continue
|
||||
|
||||
@@ -596,21 +596,18 @@ func (d *driver) CreateNetwork(id types.UUID, option map[string]interface{}) err
|
||||
// networks. This step is needed now because driver might have now set the bridge
|
||||
// name on this config struct. And because we need to check for possible address
|
||||
// conflicts, so we need to check against operationa lnetworks.
|
||||
if err := config.conflictsWithNetworks(id, networkList); err != nil {
|
||||
if err = config.conflictsWithNetworks(id, networkList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
setupNetworkIsolationRules := func(config *networkConfiguration, i *bridgeInterface) error {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err := network.isolateNetwork(networkList, false); err != nil {
|
||||
logrus.Warnf("Failed on removing the inter-network iptables rules on cleanup: %v", err)
|
||||
}
|
||||
if err := network.isolateNetwork(networkList, true); err != nil {
|
||||
if err := network.isolateNetwork(networkList, false); err != nil {
|
||||
logrus.Warnf("Failed on removing the inter-network iptables rules on cleanup: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err := network.isolateNetwork(networkList, true)
|
||||
return err
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prepare the bridge setup configuration
|
||||
@@ -766,17 +763,26 @@ func (d *driver) DeleteNetwork(nid types.UUID) error {
|
||||
}
|
||||
|
||||
func addToBridge(ifaceName, bridgeName string) error {
|
||||
iface, err := net.InterfaceByName(ifaceName)
|
||||
link, err := netlink.LinkByName(ifaceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find interface %s: %v", ifaceName, err)
|
||||
}
|
||||
if err = netlink.LinkSetMaster(link,
|
||||
&netlink.Bridge{LinkAttrs: netlink.LinkAttrs{Name: bridgeName}}); err != nil {
|
||||
logrus.Debugf("Failed to add %s to bridge via netlink.Trying ioctl: %v", ifaceName, err)
|
||||
iface, err := net.InterfaceByName(ifaceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find network interface %s: %v", ifaceName, err)
|
||||
}
|
||||
|
||||
master, err := net.InterfaceByName(bridgeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find bridge %s: %v", bridgeName, err)
|
||||
master, err := net.InterfaceByName(bridgeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find bridge %s: %v", bridgeName, err)
|
||||
}
|
||||
|
||||
return ioctlAddToBridge(iface, master)
|
||||
}
|
||||
|
||||
return ioctlAddToBridge(iface, master)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setHairpinMode(link netlink.Link, enable bool) error {
|
||||
@@ -947,15 +953,14 @@ func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointIn
|
||||
}
|
||||
|
||||
// v4 address for the sandbox side pipe interface
|
||||
sub := types.GetIPNetCanonical(n.bridge.bridgeIPv4)
|
||||
ip4, err := ipAllocator.RequestIP(sub, nil)
|
||||
ip4, err := ipAllocator.RequestIP(n.bridge.bridgeIPv4, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ipv4Addr := &net.IPNet{IP: ip4, Mask: n.bridge.bridgeIPv4.Mask}
|
||||
|
||||
// Down the interface before configuring mac address.
|
||||
if err := netlink.LinkSetDown(sbox); err != nil {
|
||||
if err = netlink.LinkSetDown(sbox); err != nil {
|
||||
return fmt.Errorf("could not set link down for container interface %s: %v", containerIfName, err)
|
||||
}
|
||||
|
||||
@@ -968,7 +973,7 @@ func (d *driver) CreateEndpoint(nid, eid types.UUID, epInfo driverapi.EndpointIn
|
||||
endpoint.macAddress = mac
|
||||
|
||||
// Up the host interface after finishing all netlink configuration
|
||||
if err := netlink.LinkSetUp(host); err != nil {
|
||||
if err = netlink.LinkSetUp(host); err != nil {
|
||||
return fmt.Errorf("could not set link up for host interface %s: %v", hostIfName, err)
|
||||
}
|
||||
|
||||
@@ -1074,8 +1079,7 @@ func (d *driver) DeleteEndpoint(nid, eid types.UUID) error {
|
||||
n.releasePorts(ep)
|
||||
|
||||
// Release the v4 address allocated to this endpoint's sandbox interface
|
||||
sub := types.GetIPNetCanonical(n.bridge.bridgeIPv4)
|
||||
err = ipAllocator.ReleaseIP(sub, ep.addr.IP)
|
||||
err = ipAllocator.ReleaseIP(n.bridge.bridgeIPv4, ep.addr.IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
package bridge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/docker/libnetwork/netutils"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
@@ -25,11 +29,25 @@ func setupDevice(config *networkConfiguration, i *bridgeInterface) error {
|
||||
// Only set the bridge's MAC address if the kernel version is > 3.3, as it
|
||||
// was not supported before that.
|
||||
kv, err := kernel.GetKernelVersion()
|
||||
if err == nil && (kv.Kernel >= 3 && kv.Major >= 3) {
|
||||
setMac = true
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to check kernel versions: %v. Will not assign a MAC address to the bridge interface", err)
|
||||
} else {
|
||||
setMac = kv.Kernel > 3 || (kv.Kernel == 3 && kv.Major >= 3)
|
||||
}
|
||||
|
||||
return ioctlCreateBridge(config.BridgeName, setMac)
|
||||
if err = netlink.LinkAdd(i.Link); err != nil {
|
||||
logrus.Debugf("Failed to create bridge %s via netlink. Trying ioctl", config.BridgeName)
|
||||
return ioctlCreateBridge(config.BridgeName, setMac)
|
||||
}
|
||||
|
||||
if setMac {
|
||||
hwAddr := netutils.GenerateRandomMAC()
|
||||
if err = netlink.LinkSetHardwareAddr(i.Link, hwAddr); err != nil {
|
||||
return fmt.Errorf("failed to set bridge mac-address %s : %s", hwAddr, err.Error())
|
||||
}
|
||||
logrus.Debugf("Setting bridge mac address to %s", hwAddr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetupDeviceUp ups the given bridge interface.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/libnetwork/netutils"
|
||||
"github.com/docker/libnetwork/types"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
@@ -32,9 +31,9 @@ func init() {
|
||||
bridgeNetworks = append(bridgeNetworks, &net.IPNet{IP: []byte{10, byte(i), 42, 1}, Mask: mask})
|
||||
}
|
||||
// 192.168.[42-44].1/24
|
||||
mask[2] = 255
|
||||
mask24 := []byte{255, 255, 255, 0}
|
||||
for i := 42; i < 45; i++ {
|
||||
bridgeNetworks = append(bridgeNetworks, &net.IPNet{IP: []byte{192, 168, byte(i), 1}, Mask: mask})
|
||||
bridgeNetworks = append(bridgeNetworks, &net.IPNet{IP: []byte{192, 168, byte(i), 1}, Mask: mask24})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,8 +75,12 @@ func setupBridgeIPv4(config *networkConfiguration, i *bridgeInterface) error {
|
||||
}
|
||||
|
||||
func allocateBridgeIP(config *networkConfiguration, i *bridgeInterface) error {
|
||||
sub := types.GetIPNetCanonical(i.bridgeIPv4)
|
||||
ipAllocator.RequestIP(sub, i.bridgeIPv4.IP)
|
||||
// Because of the way ipallocator manages the container address space,
|
||||
// reserve bridge address only if it belongs to the container network
|
||||
// (if defined), no need otherwise
|
||||
if config.FixedCIDR == nil || config.FixedCIDR.Contains(i.bridgeIPv4.IP) {
|
||||
ipAllocator.RequestIP(i.bridgeIPv4, i.bridgeIPv4.IP)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -112,10 +115,13 @@ func setupGatewayIPv4(config *networkConfiguration, i *bridgeInterface) error {
|
||||
return &ErrInvalidGateway{}
|
||||
}
|
||||
|
||||
// Pass the real network subnet to ip allocator (no host bits set)
|
||||
sub := types.GetIPNetCanonical(i.bridgeIPv4)
|
||||
if _, err := ipAllocator.RequestIP(sub, config.DefaultGatewayIPv4); err != nil {
|
||||
return err
|
||||
// Because of the way ipallocator manages the container address space,
|
||||
// reserve default gw address only if it belongs to the container network
|
||||
// (if defined), no need otherwise
|
||||
if config.FixedCIDR == nil || config.FixedCIDR.Contains(config.DefaultGatewayIPv4) {
|
||||
if _, err := ipAllocator.RequestIP(i.bridgeIPv4, config.DefaultGatewayIPv4); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Store requested default gateway
|
||||
|
||||
@@ -66,7 +66,8 @@ func (a *IPAllocator) RegisterSubnet(network *net.IPNet, subnet *net.IPNet) erro
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
|
||||
key := network.String()
|
||||
nw := &net.IPNet{IP: network.IP.Mask(network.Mask), Mask: network.Mask}
|
||||
key := nw.String()
|
||||
if _, ok := a.allocatedIPs[key]; ok {
|
||||
return ErrNetworkAlreadyRegistered
|
||||
}
|
||||
@@ -90,10 +91,11 @@ func (a *IPAllocator) RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
|
||||
key := network.String()
|
||||
nw := &net.IPNet{IP: network.IP.Mask(network.Mask), Mask: network.Mask}
|
||||
key := nw.String()
|
||||
allocated, ok := a.allocatedIPs[key]
|
||||
if !ok {
|
||||
allocated = newAllocatedMap(network)
|
||||
allocated = newAllocatedMap(nw)
|
||||
a.allocatedIPs[key] = allocated
|
||||
}
|
||||
|
||||
@@ -109,7 +111,8 @@ func (a *IPAllocator) ReleaseIP(network *net.IPNet, ip net.IP) error {
|
||||
a.mutex.Lock()
|
||||
defer a.mutex.Unlock()
|
||||
|
||||
if allocated, exists := a.allocatedIPs[network.String()]; exists {
|
||||
nw := &net.IPNet{IP: network.IP.Mask(network.Mask), Mask: network.Mask}
|
||||
if allocated, exists := a.allocatedIPs[nw.String()]; exists {
|
||||
delete(allocated.p, ip.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -74,20 +74,22 @@ func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool {
|
||||
|
||||
// NetworkRange calculates the first and last IP addresses in an IPNet
|
||||
func NetworkRange(network *net.IPNet) (net.IP, net.IP) {
|
||||
var netIP net.IP
|
||||
if network.IP.To4() != nil {
|
||||
netIP = network.IP.To4()
|
||||
} else if network.IP.To16() != nil {
|
||||
netIP = network.IP.To16()
|
||||
} else {
|
||||
if network == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
lastIP := make([]byte, len(netIP), len(netIP))
|
||||
for i := 0; i < len(netIP); i++ {
|
||||
lastIP[i] = netIP[i] | ^network.Mask[i]
|
||||
firstIP := network.IP.Mask(network.Mask)
|
||||
lastIP := types.GetIPCopy(firstIP)
|
||||
for i := 0; i < len(firstIP); i++ {
|
||||
lastIP[i] = firstIP[i] | ^network.Mask[i]
|
||||
}
|
||||
return netIP.Mask(network.Mask), net.IP(lastIP)
|
||||
|
||||
if network.IP.To4() != nil {
|
||||
firstIP = firstIP.To4()
|
||||
lastIP = lastIP.To4()
|
||||
}
|
||||
|
||||
return firstIP, lastIP
|
||||
}
|
||||
|
||||
// GetIfaceAddr returns the first IPv4 address and slice of IPv6 addresses for the specified network interface
|
||||
|
||||
@@ -139,10 +139,15 @@ func (s *sandboxData) rmEndpoint(ep *endpoint) {
|
||||
}
|
||||
}
|
||||
|
||||
// We don't check if s.endpoints is empty here because
|
||||
// it should never be empty during a rmEndpoint call and
|
||||
// if it is we will rightfully panic here
|
||||
s.Lock()
|
||||
if len(s.endpoints) == 0 {
|
||||
// s.endpoints should never be empty and this is unexpected error condition
|
||||
// We log an error message to note this down for debugging purposes.
|
||||
logrus.Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name())
|
||||
s.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
highEpBefore := s.endpoints[0]
|
||||
var (
|
||||
i int
|
||||
@@ -245,7 +250,10 @@ func (c *controller) LeaveAll(id string) error {
|
||||
}
|
||||
|
||||
sData.sandbox().Destroy()
|
||||
|
||||
c.Lock()
|
||||
delete(c.sandboxes, sandbox.GenerateKey(id))
|
||||
c.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
201
vendor/src/github.com/docker/notary/LICENSE
vendored
Normal file
201
vendor/src/github.com/docker/notary/LICENSE
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2015 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@@ -1,9 +1,19 @@
|
||||
package changelist
|
||||
|
||||
// Scopes for TufChanges are simply the TUF roles.
|
||||
// Unfortunately because of targets delegations, we can only
|
||||
// cover the base roles.
|
||||
const (
|
||||
ScopeRoot = "root"
|
||||
ScopeTargets = "targets"
|
||||
ScopeSnapshot = "snapshot"
|
||||
ScopeTimestamp = "timestamp"
|
||||
)
|
||||
|
||||
// TufChange represents a change to a TUF repo
|
||||
type TufChange struct {
|
||||
// Abbreviated because Go doesn't permit a field and method of the same name
|
||||
Actn int `json:"action"`
|
||||
Actn string `json:"action"`
|
||||
Role string `json:"role"`
|
||||
ChangeType string `json:"type"`
|
||||
ChangePath string `json:"path"`
|
||||
@@ -11,7 +21,7 @@ type TufChange struct {
|
||||
}
|
||||
|
||||
// NewTufChange initializes a tufChange object
|
||||
func NewTufChange(action int, role, changeType, changePath string, content []byte) *TufChange {
|
||||
func NewTufChange(action string, role, changeType, changePath string, content []byte) *TufChange {
|
||||
return &TufChange{
|
||||
Actn: action,
|
||||
Role: role,
|
||||
@@ -22,7 +32,7 @@ func NewTufChange(action int, role, changeType, changePath string, content []byt
|
||||
}
|
||||
|
||||
// Action return c.Actn
|
||||
func (c TufChange) Action() int {
|
||||
func (c TufChange) Action() string {
|
||||
return c.Actn
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,11 @@ type memChangelist struct {
|
||||
changes []Change
|
||||
}
|
||||
|
||||
// NewMemChangelist instantiates a new in-memory changelist
|
||||
func NewMemChangelist() Changelist {
|
||||
return &memChangelist{}
|
||||
}
|
||||
|
||||
// List returns a list of Changes
|
||||
func (cl memChangelist) List() []Change {
|
||||
return cl.changes
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user