mirror of
https://github.com/moby/moby.git
synced 2026-01-12 19:21:41 +00:00
Compare commits
262 Commits
master
...
v1.7.0-rc2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7ddecf74be | ||
|
|
8e7ea1a8fd | ||
|
|
c11128520c | ||
|
|
25fc200dcf | ||
|
|
d17f27a13f | ||
|
|
f2f2c492e1 | ||
|
|
18af7fdbba | ||
|
|
339f0a128a | ||
|
|
5a0fa9545a | ||
|
|
930f691919 | ||
|
|
bf371ab2a5 | ||
|
|
d283999b1c | ||
|
|
2786c4d0e2 | ||
|
|
2939617c8b | ||
|
|
f5e3c68c93 | ||
|
|
c96b03797a | ||
|
|
0f06c54f40 | ||
|
|
32fcacdedb | ||
|
|
140e36a77e | ||
|
|
4945a51f73 | ||
|
|
c881349e5e | ||
|
|
ecdf1297a3 | ||
|
|
db3daa7bdd | ||
|
|
c0fc839e2b | ||
|
|
2e29eadb5c | ||
|
|
ecda5c0a6d | ||
|
|
4872f86d00 | ||
|
|
6ca78fff63 | ||
|
|
e9c51c3edc | ||
|
|
b38c720f19 | ||
|
|
3bed793ba1 | ||
|
|
ab7e7a7338 | ||
|
|
4fefcde5a6 | ||
|
|
63e3b7433f | ||
|
|
736c216d58 | ||
|
|
85f0e01833 | ||
|
|
eb3ed436a4 | ||
|
|
d77d7a0056 | ||
|
|
a0aad0d4de | ||
|
|
baf9ea5ce4 | ||
|
|
a6fe70c696 | ||
|
|
eee959a9b9 | ||
|
|
d000ba05fd | ||
|
|
d8eff999e0 | ||
|
|
fb124bcad0 | ||
|
|
9827107dcf | ||
|
|
e57d649057 | ||
|
|
d6ff6e2c6f | ||
|
|
cc2944c7af | ||
|
|
9cee8c4ed0 | ||
|
|
7c8fca2ddb | ||
|
|
376188dcd3 | ||
|
|
dc610864aa | ||
|
|
97cd073598 | ||
|
|
d5ebb60bdd | ||
|
|
83c5131acd | ||
|
|
b6a9dc399b | ||
|
|
614a9690e7 | ||
|
|
545b440a80 | ||
|
|
3162024e28 | ||
|
|
769acfec29 | ||
|
|
47496519da | ||
|
|
fdd21bf032 | ||
|
|
d928dad8c8 | ||
|
|
82366ce059 | ||
|
|
6410c3c066 | ||
|
|
9231dc9cc0 | ||
|
|
6a3f37386b | ||
|
|
d9a0c05208 | ||
|
|
24cb9df189 | ||
|
|
c51cd3298c | ||
|
|
10affa8018 | ||
|
|
ce27fa2716 | ||
|
|
8d83409e85 | ||
|
|
3a73b6a2bf | ||
|
|
f99269882f | ||
|
|
568a9703ac | ||
|
|
faaeb5162d | ||
|
|
b5613baac2 | ||
|
|
c956efcd52 | ||
|
|
5455864187 | ||
|
|
ceb72fab34 | ||
|
|
c6ea062a26 | ||
|
|
0e045ab50c | ||
|
|
eeb05fc081 | ||
|
|
e1381ae328 | ||
|
|
45ad064150 | ||
|
|
72e14a1566 | ||
|
|
7d7bec86c9 | ||
|
|
a39d49d676 | ||
|
|
5bf15a013b | ||
|
|
9461967eec | ||
|
|
3be7d11cee | ||
|
|
d9910b8fd8 | ||
|
|
f115c32f6b | ||
|
|
57939badc3 | ||
|
|
51ee02d478 | ||
|
|
c92860748c | ||
|
|
f582f9717f | ||
|
|
ebcb36a8d2 | ||
|
|
e6e8f2d717 | ||
|
|
317a510261 | ||
|
|
5d3a080178 | ||
|
|
542c84c2d2 | ||
|
|
f1df74d09d | ||
|
|
4ddbc7a62f | ||
|
|
f72b2c02b8 | ||
|
|
af9dab70f8 | ||
|
|
10425e83f2 | ||
|
|
9c528dca85 | ||
|
|
cb2c25ad2d | ||
|
|
962dec81ec | ||
|
|
1eae925a3d | ||
|
|
3ce2cc8ee7 | ||
|
|
054acc4bee | ||
|
|
63cb03a55b | ||
|
|
49b6f23696 | ||
|
|
299ae6a2e6 | ||
|
|
97b521bf10 | ||
|
|
7f5937d46c | ||
|
|
b6166b9496 | ||
|
|
b596d025f5 | ||
|
|
ca32446950 | ||
|
|
5328d6d620 | ||
|
|
d0023242ab | ||
|
|
3ff002aa1a | ||
|
|
ea9b357be2 | ||
|
|
bf1829459f | ||
|
|
4f744ca781 | ||
|
|
7dab04383b | ||
|
|
8a003c8134 | ||
|
|
208178c799 | ||
|
|
03b36f3451 | ||
|
|
7758553239 | ||
|
|
10fb5ce6d0 | ||
|
|
0959aec1a9 | ||
|
|
773f74eb71 | ||
|
|
7070d9255a | ||
|
|
2cb4b7f65c | ||
|
|
2d80652d8a | ||
|
|
81b4691406 | ||
|
|
4bae33ef9f | ||
|
|
a8a31eff10 | ||
|
|
68a8fd5c4e | ||
|
|
8387c5ab65 | ||
|
|
69498943c3 | ||
|
|
1aeb78c2ae | ||
|
|
331d37f35d | ||
|
|
edf3bf7f33 | ||
|
|
9ee8dca246 | ||
|
|
aa98bb6c13 | ||
|
|
2aba3c69f9 | ||
|
|
71a44c769e | ||
|
|
d8381fad2b | ||
|
|
be379580d0 | ||
|
|
7ea8513479 | ||
|
|
3b2fe01c78 | ||
|
|
e8afc22b1f | ||
|
|
4e407e6b77 | ||
|
|
23f1c2ea9e | ||
|
|
788047cafb | ||
|
|
0c0e7b1b60 | ||
|
|
09d41529a0 | ||
|
|
cb288fefee | ||
|
|
f7636796c5 | ||
|
|
cb5af83444 | ||
|
|
96feaf1920 | ||
|
|
1f03944950 | ||
|
|
6060eedf9c | ||
|
|
d217da854a | ||
|
|
d74d6d981b | ||
|
|
0205ac33d2 | ||
|
|
dbb9d47bdc | ||
|
|
ddd1d081d7 | ||
|
|
d6ac36d929 | ||
|
|
715b94f664 | ||
|
|
16baca9277 | ||
|
|
627f8a6cd5 | ||
|
|
a8a7df203a | ||
|
|
580cbcefd3 | ||
|
|
d9c5ce6e97 | ||
|
|
0fe9b95415 | ||
|
|
41d0e4293e | ||
|
|
26fe640da1 | ||
|
|
198ca26969 | ||
|
|
d5365f6fc4 | ||
|
|
5f7e814ee7 | ||
|
|
a84aca0985 | ||
|
|
68ec22876a | ||
|
|
0dcc3559e9 | ||
|
|
d4c731ecd6 | ||
|
|
2dba4e1386 | ||
|
|
06a7f471e0 | ||
|
|
4683d01691 | ||
|
|
6020a06399 | ||
|
|
cc0bfccdf4 | ||
|
|
0c18ec62f3 | ||
|
|
a9825c9bd8 | ||
|
|
908be50c44 | ||
|
|
2a82dba34d | ||
|
|
13fd2a908c | ||
|
|
464891aaf8 | ||
|
|
9974663ed7 | ||
|
|
76269e5c9d | ||
|
|
1121d7c4fd | ||
|
|
7e197575a2 | ||
|
|
3dc3059d94 | ||
|
|
7b6de74c9a | ||
|
|
cad8adacb8 | ||
|
|
6226deeaf4 | ||
|
|
3ec19f56cf | ||
|
|
48c71787ed | ||
|
|
604731a930 | ||
|
|
e8650e01f8 | ||
|
|
817d04d992 | ||
|
|
cdff91a01c | ||
|
|
6f26bd0e16 | ||
|
|
3c090db4e9 | ||
|
|
b7c3fdfd0d | ||
|
|
aa682a845b | ||
|
|
218d0dcc9d | ||
|
|
510d8f8634 | ||
|
|
b65600f6b6 | ||
|
|
79dcea718c | ||
|
|
072b09c45d | ||
|
|
c2d9837745 | ||
|
|
fa5dfbb18b | ||
|
|
6532a075f3 | ||
|
|
3b4a4bf809 | ||
|
|
4602909566 | ||
|
|
588f350b61 | ||
|
|
6e5ff509b2 | ||
|
|
61d341c2ca | ||
|
|
b996d379a1 | ||
|
|
b0935ea730 | ||
|
|
96fe13b49b | ||
|
|
12ccde442a | ||
|
|
4262cfe41f | ||
|
|
ddc2e25546 | ||
|
|
6646cff646 | ||
|
|
ac8fd856c0 | ||
|
|
48754d673c | ||
|
|
723684525a | ||
|
|
32aceadbe6 | ||
|
|
c67d3e159c | ||
|
|
a080e2add7 | ||
|
|
24d81b0ddb | ||
|
|
08f2fad40b | ||
|
|
f91fbe39ce | ||
|
|
018ab080bb | ||
|
|
fe94ecb2c1 | ||
|
|
7b2e67036f | ||
|
|
e130faea1b | ||
|
|
38f09de334 | ||
|
|
f9ba68ddfb | ||
|
|
16913455bd | ||
|
|
32f189cd08 | ||
|
|
526ca42282 | ||
|
|
b98b42d843 | ||
|
|
7bf03dd132 | ||
|
|
034aa3b2c4 | ||
|
|
6da1e01e6c |
34
CHANGELOG.md
34
CHANGELOG.md
@@ -1,5 +1,39 @@
|
||||
# Changelog
|
||||
|
||||
## 1.7.0 (2015-06-16)
|
||||
|
||||
#### Runtime
|
||||
+ Experimental feature: support for out-of-process volume plugins
|
||||
+ Experimental feature: support for out-of-process network plugins
|
||||
* Logging: syslog logging driver is available
|
||||
* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag
|
||||
* The `exec` command supports the `-u|--user` flag to specify the new process owner
|
||||
+ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags
|
||||
+ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota`
|
||||
+ Container block IO can be controlled in `docker run` using`--blkio-weight`
|
||||
+ ZFS support
|
||||
+ The `docker logs` command supports a `--since` argument
|
||||
+ UTS namespace can be shared with the host with `docker run --uts=host`
|
||||
|
||||
#### Quality
|
||||
* Networking stack was entirely rewritten as part of the libnetwork effort
|
||||
* Engine internals refactoring (shout out to the contributors?)
|
||||
* Volumes code was entirely rewritten to support the plugins effort
|
||||
+ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting
|
||||
|
||||
#### Build
|
||||
+ Support ${variable:-value} and ${variable:+value} syntax for environment variables
|
||||
+ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems`
|
||||
+ git context changes with branches and directories
|
||||
* The .dockerignore file support exclusion rules
|
||||
|
||||
#### Distribution
|
||||
+ Client support for v2 mirroring support for the official registry
|
||||
|
||||
#### Bugfixes
|
||||
* Firewalld is now supported and will automatically be used when available
|
||||
* mounting --device recursively
|
||||
|
||||
## 1.6.2 (2015-05-13)
|
||||
|
||||
#### Runtime
|
||||
|
||||
@@ -180,7 +180,7 @@ Contributing to Docker
|
||||
======================
|
||||
|
||||
[](https://godoc.org/github.com/docker/docker)
|
||||
[](https://jenkins.dockerproject.com/job/Docker%20Master/)
|
||||
[](https://jenkins.dockerproject.org/job/Docker%20Master/)
|
||||
|
||||
Want to hack on Docker? Awesome! We have [instructions to help you get
|
||||
started contributing code or documentation.](https://docs.docker.com/project/who-written-for/).
|
||||
@@ -192,12 +192,12 @@ Getting the development builds
|
||||
==============================
|
||||
|
||||
Want to run Docker from a master build? You can download
|
||||
master builds at [master.dockerproject.com](https://master.dockerproject.com).
|
||||
master builds at [master.dockerproject.org](https://master.dockerproject.org).
|
||||
They are updated with each commit merged into the master branch.
|
||||
|
||||
Don't know how to use that super cool new feature in the master build? Check
|
||||
out the master docs at
|
||||
[docs.master.dockerproject.com](http://docs.master.dockerproject.com).
|
||||
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
|
||||
|
||||
How the project is run
|
||||
======================
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -97,7 +96,7 @@ func (cli *DockerCli) Cmd(args ...string) error {
|
||||
if len(args) > 0 {
|
||||
method, exists := cli.getMethod(args[0])
|
||||
if !exists {
|
||||
return fmt.Errorf("docker: '%s' is not a docker command. See 'docker --help'.", args[0])
|
||||
return fmt.Errorf("docker: '%s' is not a docker command.\nSee 'docker --help'.", args[0])
|
||||
}
|
||||
return method(args[1:]...)
|
||||
}
|
||||
@@ -117,18 +116,19 @@ func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bo
|
||||
errorHandling = flag.ContinueOnError
|
||||
}
|
||||
flags := flag.NewFlagSet(name, errorHandling)
|
||||
if signature != "" {
|
||||
signature = " " + signature
|
||||
}
|
||||
flags.Usage = func() {
|
||||
flags.ShortUsage()
|
||||
flags.PrintDefaults()
|
||||
}
|
||||
flags.ShortUsage = func() {
|
||||
options := ""
|
||||
if signature != "" {
|
||||
signature = " " + signature
|
||||
}
|
||||
if flags.FlagCountUndeprecated() > 0 {
|
||||
options = " [OPTIONS]"
|
||||
}
|
||||
fmt.Fprintf(cli.out, "\nUsage: docker %s%s%s\n\n%s\n\n", name, options, signature, description)
|
||||
flags.SetOutput(cli.out)
|
||||
flags.PrintDefaults()
|
||||
os.Exit(0)
|
||||
fmt.Fprintf(flags.Out(), "\nUsage: docker %s%s%s\n\n%s\n", name, options, signature, description)
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
@@ -145,6 +145,7 @@ func (cli *DockerCli) CmdCreate(args ...string) error {
|
||||
config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
|
||||
if err != nil {
|
||||
cmd.ReportError(err.Error(), true)
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.Image == "" {
|
||||
cmd.Usage()
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
)
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
func (cli *DockerCli) CmdInfo(args ...string) error {
|
||||
cmd := cli.Subcmd("info", "", "Display system-wide information", true)
|
||||
cmd.Require(flag.Exact, 0)
|
||||
cmd.ParseFlags(args, false)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
rdr, _, err := cli.call("GET", "/info", nil, nil)
|
||||
if err != nil {
|
||||
@@ -29,20 +30,20 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
||||
|
||||
fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers)
|
||||
fmt.Fprintf(cli.out, "Images: %d\n", info.Images)
|
||||
fmt.Fprintf(cli.out, "Storage Driver: %s\n", info.Driver)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver)
|
||||
if info.DriverStatus != nil {
|
||||
for _, pair := range info.DriverStatus {
|
||||
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(cli.out, "Execution Driver: %s\n", info.ExecutionDriver)
|
||||
fmt.Fprintf(cli.out, "Logging Driver: %s\n", info.LoggingDriver)
|
||||
fmt.Fprintf(cli.out, "Kernel Version: %s\n", info.KernelVersion)
|
||||
fmt.Fprintf(cli.out, "Operating System: %s\n", info.OperatingSystem)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem)
|
||||
fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU)
|
||||
fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal)))
|
||||
fmt.Fprintf(cli.out, "Name: %s\n", info.Name)
|
||||
fmt.Fprintf(cli.out, "ID: %s\n", info.ID)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID)
|
||||
|
||||
if info.Debug {
|
||||
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug)
|
||||
@@ -55,15 +56,9 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
|
||||
fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir)
|
||||
}
|
||||
|
||||
if info.HttpProxy != "" {
|
||||
fmt.Fprintf(cli.out, "Http Proxy: %s\n", info.HttpProxy)
|
||||
}
|
||||
if info.HttpsProxy != "" {
|
||||
fmt.Fprintf(cli.out, "Https Proxy: %s\n", info.HttpsProxy)
|
||||
}
|
||||
if info.NoProxy != "" {
|
||||
fmt.Fprintf(cli.out, "No Proxy: %s\n", info.NoProxy)
|
||||
}
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HttpProxy)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HttpsProxy)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy)
|
||||
|
||||
if info.IndexServerAddress != "" {
|
||||
u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username
|
||||
|
||||
@@ -16,7 +16,7 @@ func (cli *DockerCli) CmdLogout(args ...string) error {
|
||||
cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true)
|
||||
cmd.Require(flag.Max, 1)
|
||||
|
||||
cmd.ParseFlags(args, false)
|
||||
cmd.ParseFlags(args, true)
|
||||
serverAddress := registry.IndexServerAddress()
|
||||
if len(cmd.Args()) > 0 {
|
||||
serverAddress = cmd.Arg(0)
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func (cli *DockerCli) CmdPause(args ...string) error {
|
||||
cmd := cli.Subcmd("pause", "CONTAINER [CONTAINER...]", "Pause all processes within a container", true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
cmd.ParseFlags(args, false)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
|
||||
@@ -57,6 +57,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
|
||||
// just in case the Parse does not exit
|
||||
if err != nil {
|
||||
cmd.ReportError(err.Error(), true)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(hostConfig.Dns) > 0 {
|
||||
|
||||
@@ -46,7 +46,6 @@ func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
|
||||
var (
|
||||
previousCPU uint64
|
||||
previousSystem uint64
|
||||
start = true
|
||||
dec = json.NewDecoder(stream)
|
||||
u = make(chan error, 1)
|
||||
)
|
||||
@@ -61,10 +60,9 @@ func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
|
||||
memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
|
||||
cpuPercent = 0.0
|
||||
)
|
||||
if !start {
|
||||
cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
|
||||
}
|
||||
start = false
|
||||
previousCPU = v.PreCpuStats.CpuUsage.TotalUsage
|
||||
previousSystem = v.PreCpuStats.SystemUsage
|
||||
cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
|
||||
s.mu.Lock()
|
||||
s.CPUPercentage = cpuPercent
|
||||
s.Memory = float64(v.MemoryStats.Usage)
|
||||
@@ -73,8 +71,6 @@ func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
|
||||
s.NetworkRx = float64(v.Network.RxBytes)
|
||||
s.NetworkTx = float64(v.Network.TxBytes)
|
||||
s.mu.Unlock()
|
||||
previousCPU = v.CpuStats.CpuUsage.TotalUsage
|
||||
previousSystem = v.CpuStats.SystemUsage
|
||||
u <- nil
|
||||
if !streamStats {
|
||||
return
|
||||
@@ -151,7 +147,7 @@ func (cli *DockerCli) CmdStats(args ...string) error {
|
||||
}
|
||||
// do a quick pause so that any failed connections for containers that do not exist are able to be
|
||||
// evicted before we display the initial or default values.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
var errs []string
|
||||
for _, c := range cStats {
|
||||
c.mu.Lock()
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func (cli *DockerCli) CmdUnpause(args ...string) error {
|
||||
cmd := cli.Subcmd("unpause", "CONTAINER [CONTAINER...]", "Unpause all processes within a container", true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
cmd.ParseFlags(args, false)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/autogen/dockerversion"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
// CmdVersion shows Docker version information.
|
||||
@@ -20,7 +21,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
||||
cmd := cli.Subcmd("version", "", "Show the Docker version information.", true)
|
||||
cmd.Require(flag.Exact, 0)
|
||||
|
||||
cmd.ParseFlags(args, false)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
if dockerversion.VERSION != "" {
|
||||
fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION)
|
||||
@@ -31,6 +32,9 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
||||
fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
|
||||
}
|
||||
fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
||||
if utils.ExperimentalBuild() {
|
||||
fmt.Fprintf(cli.out, "Experimental (client): true\n")
|
||||
}
|
||||
|
||||
stream, _, err := cli.call("GET", "/version", nil, nil)
|
||||
if err != nil {
|
||||
@@ -50,6 +54,8 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
|
||||
fmt.Fprintf(cli.out, "Go version (server): %s\n", v.GoVersion)
|
||||
fmt.Fprintf(cli.out, "Git commit (server): %s\n", v.GitCommit)
|
||||
fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", v.Os, v.Arch)
|
||||
|
||||
if v.Experimental {
|
||||
fmt.Fprintf(cli.out, "Experimental (server): true\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,15 @@ func boolValue(r *http.Request, k string) bool {
|
||||
return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
|
||||
}
|
||||
|
||||
// boolValueOrDefault returns the default bool passed if the query param is
|
||||
// missing, otherwise it's just a proxy to boolValue above
|
||||
func boolValueOrDefault(r *http.Request, k string, d bool) bool {
|
||||
if _, ok := r.Form[k]; !ok {
|
||||
return d
|
||||
}
|
||||
return boolValue(r, k)
|
||||
}
|
||||
|
||||
func int64ValueOrZero(r *http.Request, k string) int64 {
|
||||
val, err := strconv.ParseInt(r.FormValue(k), 10, 64)
|
||||
if err != nil {
|
||||
|
||||
@@ -33,6 +33,21 @@ func TestBoolValue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBoolValueOrDefault(t *testing.T) {
|
||||
r, _ := http.NewRequest("GET", "", nil)
|
||||
if !boolValueOrDefault(r, "queryparam", true) {
|
||||
t.Fatal("Expected to get true default value, got false")
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("param", "")
|
||||
r, _ = http.NewRequest("GET", "", nil)
|
||||
r.Form = v
|
||||
if boolValueOrDefault(r, "param", true) {
|
||||
t.Fatal("Expected not to get true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64ValueOrZero(t *testing.T) {
|
||||
cases := map[string]int64{
|
||||
"": 0,
|
||||
|
||||
@@ -97,15 +97,17 @@ func (s *Server) ServeApi(protoAddrs []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.servers = append(s.servers, srv)
|
||||
s.servers = append(s.servers, srv...)
|
||||
|
||||
go func(proto, addr string) {
|
||||
logrus.Infof("Listening for HTTP on %s (%s)", proto, addr)
|
||||
if err := srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
|
||||
err = nil
|
||||
}
|
||||
chErrors <- err
|
||||
}(protoAddrParts[0], protoAddrParts[1])
|
||||
for _, s := range srv {
|
||||
logrus.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
|
||||
go func(s serverCloser) {
|
||||
if err := s.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") {
|
||||
err = nil
|
||||
}
|
||||
chErrors <- err
|
||||
}(s)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(protoAddrs); i++ {
|
||||
@@ -252,6 +254,11 @@ func (s *Server) getVersion(version version.Version, w http.ResponseWriter, r *h
|
||||
Os: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
}
|
||||
|
||||
if version.GreaterThanOrEqualTo("1.19") {
|
||||
v.Experimental = utils.ExperimentalBuild()
|
||||
}
|
||||
|
||||
if kernelVersion, err := kernel.GetKernelVersion(); err == nil {
|
||||
v.KernelVersion = kernelVersion.String()
|
||||
}
|
||||
@@ -271,14 +278,20 @@ func (s *Server) postContainersKill(version version.Version, w http.ResponseWrit
|
||||
name := vars["name"]
|
||||
|
||||
// If we have a signal, look at it. Otherwise, do nothing
|
||||
if sigStr := vars["signal"]; sigStr != "" {
|
||||
if sigStr := r.Form.Get("signal"); sigStr != "" {
|
||||
// Check if we passed the signal as a number:
|
||||
// The largest legal signal is 31, so let's parse on 5 bits
|
||||
sig, err := strconv.ParseUint(sigStr, 10, 5)
|
||||
sigN, err := strconv.ParseUint(sigStr, 10, 5)
|
||||
if err != nil {
|
||||
// The signal is not a number, treat it as a string (either like
|
||||
// "KILL" or like "SIGKILL")
|
||||
sig = uint64(signal.SignalMap[strings.TrimPrefix(sigStr, "SIG")])
|
||||
syscallSig, ok := signal.SignalMap[strings.TrimPrefix(sigStr, "SIG")]
|
||||
if !ok {
|
||||
return fmt.Errorf("Invalid signal: %s", sigStr)
|
||||
}
|
||||
sig = uint64(syscallSig)
|
||||
} else {
|
||||
sig = sigN
|
||||
}
|
||||
|
||||
if sig == 0 {
|
||||
@@ -453,6 +466,12 @@ func (s *Server) getEvents(version version.Version, w http.ResponseWriter, r *ht
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var closeNotify <-chan bool
|
||||
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
||||
closeNotify = closeNotifier.CloseNotify()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev := <-l:
|
||||
@@ -465,6 +484,9 @@ func (s *Server) getEvents(version version.Version, w http.ResponseWriter, r *ht
|
||||
}
|
||||
case <-timer.C:
|
||||
return nil
|
||||
case <-closeNotify:
|
||||
logrus.Debug("Client disconnected, stop sending events")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -550,7 +572,7 @@ func (s *Server) getContainersStats(version version.Version, w http.ResponseWrit
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
|
||||
return s.daemon.ContainerStats(vars["name"], boolValue(r, "stream"), ioutils.NewWriteFlusher(w))
|
||||
return s.daemon.ContainerStats(vars["name"], boolValueOrDefault(r, "stream", true), ioutils.NewWriteFlusher(w))
|
||||
}
|
||||
|
||||
func (s *Server) getContainersLogs(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@@ -1133,6 +1155,14 @@ func (s *Server) getContainersByName(version version.Version, w http.ResponseWri
|
||||
return fmt.Errorf("Missing parameter")
|
||||
}
|
||||
|
||||
if version.LessThan("1.19") {
|
||||
containerJSONRaw, err := s.daemon.ContainerInspectRaw(vars["name"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeJSON(w, http.StatusOK, containerJSONRaw)
|
||||
}
|
||||
|
||||
containerJSON, err := s.daemon.ContainerInspect(vars["name"])
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -12,57 +12,48 @@ import (
|
||||
"github.com/docker/docker/pkg/systemd"
|
||||
)
|
||||
|
||||
// newServer sets up the required serverCloser and does protocol specific checking.
|
||||
func (s *Server) newServer(proto, addr string) (serverCloser, error) {
|
||||
// newServer sets up the required serverClosers and does protocol specific checking.
|
||||
func (s *Server) newServer(proto, addr string) ([]serverCloser, error) {
|
||||
var (
|
||||
err error
|
||||
l net.Listener
|
||||
ls []net.Listener
|
||||
)
|
||||
switch proto {
|
||||
case "fd":
|
||||
ls, err := systemd.ListenFD(addr)
|
||||
ls, err = systemd.ListenFD(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chErrors := make(chan error, len(ls))
|
||||
// We don't want to start serving on these sockets until the
|
||||
// daemon is initialized and installed. Otherwise required handlers
|
||||
// won't be ready.
|
||||
<-s.start
|
||||
// Since ListenFD will return one or more sockets we have
|
||||
// to create a go func to spawn off multiple serves
|
||||
for i := range ls {
|
||||
listener := ls[i]
|
||||
go func() {
|
||||
httpSrv := http.Server{Handler: s.router}
|
||||
chErrors <- httpSrv.Serve(listener)
|
||||
}()
|
||||
}
|
||||
for i := 0; i < len(ls); i++ {
|
||||
if err := <-chErrors; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
case "tcp":
|
||||
l, err = s.initTcpSocket(addr)
|
||||
l, err := s.initTcpSocket(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls = append(ls, l)
|
||||
case "unix":
|
||||
if l, err = sockets.NewUnixSocket(addr, s.cfg.SocketGroup, s.start); err != nil {
|
||||
l, err := sockets.NewUnixSocket(addr, s.cfg.SocketGroup, s.start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls = append(ls, l)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid protocol format: %q", proto)
|
||||
}
|
||||
return &HttpServer{
|
||||
&http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.router,
|
||||
},
|
||||
l,
|
||||
}, nil
|
||||
var res []serverCloser
|
||||
for _, l := range ls {
|
||||
res = append(res, &HttpServer{
|
||||
&http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.router,
|
||||
},
|
||||
l,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *Server) AcceptConnections(d *daemon.Daemon) {
|
||||
|
||||
@@ -81,6 +81,7 @@ type Network struct {
|
||||
type Stats struct {
|
||||
Read time.Time `json:"read"`
|
||||
Network Network `json:"network,omitempty"`
|
||||
PreCpuStats CpuStats `json:"precpu_stats,omitempty"`
|
||||
CpuStats CpuStats `json:"cpu_stats,omitempty"`
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
|
||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
|
||||
|
||||
@@ -101,16 +101,16 @@ type Port struct {
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
ID string `json:"Id"`
|
||||
Names []string `json:",omitempty"`
|
||||
Image string `json:",omitempty"`
|
||||
Command string `json:",omitempty"`
|
||||
Created int `json:",omitempty"`
|
||||
Ports []Port `json:",omitempty"`
|
||||
SizeRw int `json:",omitempty"`
|
||||
SizeRootFs int `json:",omitempty"`
|
||||
Labels map[string]string `json:",omitempty"`
|
||||
Status string `json:",omitempty"`
|
||||
ID string `json:"Id"`
|
||||
Names []string
|
||||
Image string
|
||||
Command string
|
||||
Created int
|
||||
Ports []Port
|
||||
SizeRw int `json:",omitempty"`
|
||||
SizeRootFs int `json:",omitempty"`
|
||||
Labels map[string]string
|
||||
Status string
|
||||
}
|
||||
|
||||
// POST "/containers/"+containerID+"/copy"
|
||||
@@ -132,6 +132,7 @@ type Version struct {
|
||||
Os string
|
||||
Arch string
|
||||
KernelVersion string `json:",omitempty"`
|
||||
Experimental bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GET "/info"
|
||||
@@ -194,12 +195,11 @@ type ContainerState struct {
|
||||
}
|
||||
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSON struct {
|
||||
type ContainerJSONBase struct {
|
||||
Id string
|
||||
Created time.Time
|
||||
Path string
|
||||
Args []string
|
||||
Config *runconfig.Config
|
||||
State *ContainerState
|
||||
Image string
|
||||
NetworkSettings *network.Settings
|
||||
@@ -219,3 +219,24 @@ type ContainerJSON struct {
|
||||
ExecIDs []string
|
||||
HostConfig *runconfig.HostConfig
|
||||
}
|
||||
|
||||
type ContainerJSON struct {
|
||||
*ContainerJSONBase
|
||||
Config *runconfig.Config
|
||||
}
|
||||
|
||||
// backcompatibility struct along with ContainerConfig
|
||||
type ContainerJSONRaw struct {
|
||||
*ContainerJSONBase
|
||||
Config *ContainerConfig
|
||||
}
|
||||
|
||||
type ContainerConfig struct {
|
||||
*runconfig.Config
|
||||
|
||||
// backward compatibility, they now live in HostConfig
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CpuShares int64
|
||||
Cpuset string
|
||||
}
|
||||
|
||||
15
contrib/builder/rpm/fedora-22/Dockerfile
Normal file
15
contrib/builder/rpm/fedora-22/Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
#
|
||||
# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/generate.sh"!
|
||||
#
|
||||
|
||||
FROM fedora:22
|
||||
|
||||
RUN yum install -y @development-tools fedora-packager
|
||||
RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel sqlite-devel tar
|
||||
|
||||
ENV GO_VERSION 1.4.2
|
||||
RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
|
||||
ENV PATH $PATH:/usr/local/go/bin
|
||||
|
||||
ENV AUTO_GOPATH 1
|
||||
ENV DOCKER_BUILDTAGS selinux
|
||||
@@ -212,11 +212,12 @@ _docker_docker() {
|
||||
--selinux-enabled
|
||||
--tls
|
||||
--tlsverify
|
||||
--userland-proxy=false
|
||||
--version -v
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
--graph|-g)
|
||||
--exec-root|--graph|-g)
|
||||
_filedir -d
|
||||
return
|
||||
;;
|
||||
@@ -267,22 +268,25 @@ _docker_attach() {
|
||||
|
||||
_docker_build() {
|
||||
case "$prev" in
|
||||
--tag|-t)
|
||||
__docker_image_repos_and_tags
|
||||
--cgroup-parent|--cpuset-cpus|--cpuset-mems|--cpu-shares|-c|--cpu-period|--cpu-quota|--memory|-m|--memory-swap)
|
||||
return
|
||||
;;
|
||||
--file|-f)
|
||||
_filedir
|
||||
return
|
||||
;;
|
||||
--tag|-t)
|
||||
__docker_image_repos_and_tags
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--cpu-shares -c --cpuset-cpus --cpu-quota --file -f --force-rm --help --memory -m --memory-swap --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--cgroup-parent --cpuset-cpus --cpuset-mems --cpu-shares -c --cpu-period --cpu-quota --file -f --force-rm --help --memory -m --memory-swap --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter="$(__docker_pos_first_nonflag '--tag|-t')"
|
||||
local counter="$(__docker_pos_first_nonflag '--cgroup-parent|--cpuset-cpus|--cpuset-mems|--cpu-shares|-c|--cpu-period|--cpu-quota|--file|-f|--memory|-m|--memory-swap|--tag|-t')"
|
||||
if [ $cword -eq $counter ]; then
|
||||
_filedir -d
|
||||
fi
|
||||
@@ -405,6 +409,12 @@ _docker_events() {
|
||||
}
|
||||
|
||||
_docker_exec() {
|
||||
case "$prev" in
|
||||
--user|-u)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty -u --user" -- "$cur" ) )
|
||||
@@ -586,7 +596,7 @@ _docker_logout() {
|
||||
|
||||
_docker_logs() {
|
||||
case "$prev" in
|
||||
--tail)
|
||||
--since|--tail)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
@@ -771,15 +781,16 @@ _docker_rmi() {
|
||||
_docker_run() {
|
||||
local options_with_args="
|
||||
--add-host
|
||||
--blkio-weight
|
||||
--attach -a
|
||||
--cap-add
|
||||
--cap-drop
|
||||
--cgroup-parent
|
||||
--cidfile
|
||||
--cpuset
|
||||
--cpu-shares -c
|
||||
--cpu-period
|
||||
--cpu-quota
|
||||
--cpu-shares -c
|
||||
--device
|
||||
--dns
|
||||
--dns-search
|
||||
@@ -805,6 +816,7 @@ _docker_run() {
|
||||
--security-opt
|
||||
--user -u
|
||||
--ulimit
|
||||
--uts
|
||||
--volumes-from
|
||||
--volume -v
|
||||
--workdir -w
|
||||
@@ -1156,6 +1168,8 @@ _docker() {
|
||||
--api-cors-header
|
||||
--bip
|
||||
--bridge -b
|
||||
--default-gateway
|
||||
--default-gateway-v6
|
||||
--default-ulimit
|
||||
--dns
|
||||
--dns-search
|
||||
@@ -1203,6 +1217,9 @@ _docker() {
|
||||
;;
|
||||
-*)
|
||||
;;
|
||||
=)
|
||||
(( counter++ ))
|
||||
;;
|
||||
*)
|
||||
command="${words[$counter]}"
|
||||
cpos=$counter
|
||||
|
||||
@@ -1054,6 +1054,15 @@ func (container *Container) networkMounts() []execdriver.Mount {
|
||||
return mounts
|
||||
}
|
||||
|
||||
func (container *Container) addBindMountPoint(name, source, destination string, rw bool) {
|
||||
container.MountPoints[destination] = &mountPoint{
|
||||
Name: name,
|
||||
Source: source,
|
||||
Destination: destination,
|
||||
RW: rw,
|
||||
}
|
||||
}
|
||||
|
||||
func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
|
||||
container.MountPoints[destination] = &mountPoint{
|
||||
Name: name,
|
||||
|
||||
@@ -183,7 +183,7 @@ func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.
|
||||
|
||||
func populateCommand(c *Container, env []string) error {
|
||||
var en *execdriver.Network
|
||||
if !c.daemon.config.DisableNetwork {
|
||||
if !c.Config.NetworkDisabled {
|
||||
en = &execdriver.Network{
|
||||
NamespacePath: c.NetworkSettings.SandboxKey,
|
||||
}
|
||||
@@ -227,9 +227,10 @@ func populateCommand(c *Container, env []string) error {
|
||||
|
||||
userSpecifiedDevices = append(userSpecifiedDevices, devs...)
|
||||
}
|
||||
allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...)
|
||||
|
||||
autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...)
|
||||
allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices)
|
||||
|
||||
autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices)
|
||||
|
||||
// TODO: this can be removed after lxc-conf is fully deprecated
|
||||
lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig)
|
||||
@@ -309,6 +310,25 @@ func populateCommand(c *Container, env []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device {
|
||||
if len(userDevices) == 0 {
|
||||
return defaultDevices
|
||||
}
|
||||
|
||||
paths := map[string]*configs.Device{}
|
||||
for _, d := range userDevices {
|
||||
paths[d.Path] = d
|
||||
}
|
||||
|
||||
var devs []*configs.Device
|
||||
for _, d := range defaultDevices {
|
||||
if _, defined := paths[d.Path]; !defined {
|
||||
devs = append(devs, d)
|
||||
}
|
||||
}
|
||||
return append(devs, userDevices...)
|
||||
}
|
||||
|
||||
// GetSize, return real size, virtual size
|
||||
func (container *Container) GetSize() (int64, int64) {
|
||||
var (
|
||||
@@ -493,13 +513,23 @@ func (container *Container) buildPortMapInfo(n libnetwork.Network, ep libnetwork
|
||||
networkSettings.MacAddress = mac.(net.HardwareAddr).String()
|
||||
}
|
||||
|
||||
networkSettings.Ports = nat.PortMap{}
|
||||
|
||||
if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
|
||||
if exposedPorts, ok := expData.([]types.TransportPort); ok {
|
||||
for _, tp := range exposedPorts {
|
||||
natPort := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
|
||||
networkSettings.Ports[natPort] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mapData, ok := driverInfo[netlabel.PortMap]
|
||||
if !ok {
|
||||
return networkSettings, nil
|
||||
}
|
||||
|
||||
if portMapping, ok := mapData.([]types.PortBinding); ok {
|
||||
networkSettings.Ports = nat.PortMap{}
|
||||
for _, pp := range portMapping {
|
||||
natPort := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
|
||||
natBndg := nat.PortBinding{HostIp: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
|
||||
@@ -913,6 +943,12 @@ func (container *Container) ReleaseNetwork() {
|
||||
return
|
||||
}
|
||||
|
||||
// If the container is not attached to any network do not try
|
||||
// to release network and generate spurious error messages.
|
||||
if container.NetworkSettings.NetworkID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
|
||||
if err != nil {
|
||||
logrus.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
|
||||
|
||||
@@ -213,7 +213,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
|
||||
// we'll waste time if we update it for every container
|
||||
daemon.idIndex.Add(container.ID)
|
||||
|
||||
if err := daemon.verifyOldVolumesInfo(container); err != nil {
|
||||
if err := daemon.verifyVolumesInfo(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1003,8 +1003,9 @@ func (daemon *Daemon) Shutdown() error {
|
||||
|
||||
go func() {
|
||||
defer group.Done()
|
||||
if err := c.KillSig(15); err != nil {
|
||||
logrus.Debugf("kill 15 error for %s - %s", c.ID, err)
|
||||
// If container failed to exit in 10 seconds of SIGTERM, then using the force
|
||||
if err := c.Stop(10); err != nil {
|
||||
logrus.Errorf("Stop container %s with error: %v", c.ID, err)
|
||||
}
|
||||
c.WaitStop(-1 * time.Second)
|
||||
logrus.Debugf("container stopped %s", c.ID)
|
||||
|
||||
@@ -129,12 +129,25 @@ func TestLoadWithVolume(t *testing.T) {
|
||||
|
||||
containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
|
||||
containerPath := filepath.Join(tmp, containerId)
|
||||
if err = os.MkdirAll(containerPath, 0755); err != nil {
|
||||
if err := os.MkdirAll(containerPath, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hostVolumeId := stringid.GenerateRandomID()
|
||||
volumePath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
|
||||
vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
|
||||
volumePath := filepath.Join(tmp, "volumes", hostVolumeId)
|
||||
|
||||
if err := os.MkdirAll(vfsPath, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(volumePath, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
content := filepath.Join(vfsPath, "helo")
|
||||
if err := ioutil.WriteFile(content, []byte("HELO"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
|
||||
"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
|
||||
@@ -152,7 +165,7 @@ func TestLoadWithVolume(t *testing.T) {
|
||||
"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
|
||||
"UpdateDns":false,"Volumes":{"/vol1":"%s"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}`
|
||||
|
||||
cfg := fmt.Sprintf(config, volumePath)
|
||||
cfg := fmt.Sprintf(config, vfsPath)
|
||||
if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(cfg), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -165,10 +178,6 @@ func TestLoadWithVolume(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(volumePath, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
daemon := &Daemon{
|
||||
repository: tmp,
|
||||
root: tmp,
|
||||
@@ -179,7 +188,7 @@ func TestLoadWithVolume(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = daemon.verifyOldVolumesInfo(c)
|
||||
err = daemon.verifyVolumesInfo(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -204,4 +213,91 @@ func TestLoadWithVolume(t *testing.T) {
|
||||
if m.Driver != volume.DefaultDriverName {
|
||||
t.Fatalf("Expected mount driver local, was %s\n", m.Driver)
|
||||
}
|
||||
|
||||
newVolumeContent := filepath.Join(volumePath, "helo")
|
||||
b, err := ioutil.ReadFile(newVolumeContent)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != "HELO" {
|
||||
t.Fatalf("Expected HELO, was %s\n", string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadWithBindMount(t *testing.T) {
|
||||
tmp, err := ioutil.TempDir("", "docker-daemon-test-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
|
||||
containerPath := filepath.Join(tmp, containerId)
|
||||
if err = os.MkdirAll(containerPath, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
|
||||
"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
|
||||
"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top",
|
||||
"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
|
||||
"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true,
|
||||
"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null,
|
||||
"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
|
||||
"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
|
||||
"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","PortMapping":null,"Ports":{}},
|
||||
"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf",
|
||||
"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname",
|
||||
"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts",
|
||||
"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log",
|
||||
"Name":"/ubuntu","Driver":"aufs","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
|
||||
"UpdateDns":false,"Volumes":{"/vol1": "/vol1"},"VolumesRW":{"/vol1":true},"AppliedVolumesFrom":null}`
|
||||
|
||||
if err = ioutil.WriteFile(filepath.Join(containerPath, "config.json"), []byte(config), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hostConfig := `{"Binds":["/vol1:/vol1"],"ContainerIDFile":"","LxcConf":[],"Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
|
||||
"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
|
||||
"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
|
||||
"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
|
||||
if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
daemon := &Daemon{
|
||||
repository: tmp,
|
||||
root: tmp,
|
||||
}
|
||||
|
||||
c, err := daemon.load(containerId)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = daemon.verifyVolumesInfo(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.MountPoints) != 1 {
|
||||
t.Fatalf("Expected 1 volume mounted, was 0\n")
|
||||
}
|
||||
|
||||
m := c.MountPoints["/vol1"]
|
||||
if m.Name != "" {
|
||||
t.Fatalf("Expected empty mount name, was %s\n", m.Name)
|
||||
}
|
||||
|
||||
if m.Source != "/vol1" {
|
||||
t.Fatalf("Expected mount source /vol1, was %s\n", m.Source)
|
||||
}
|
||||
|
||||
if m.Destination != "/vol1" {
|
||||
t.Fatalf("Expected mount destination /vol1, was %s\n", m.Destination)
|
||||
}
|
||||
|
||||
if !m.RW {
|
||||
t.Fatalf("Expected mount point to be RW but it was not\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +63,8 @@ func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, fi
|
||||
repos := daemon.Repositories().ByID()[img.ID]
|
||||
|
||||
//If delete by id, see if the id belong only to one repository
|
||||
if repoName == "" {
|
||||
deleteByID := repoName == ""
|
||||
if deleteByID {
|
||||
for _, repoAndTag := range repos {
|
||||
parsedRepo, parsedTag := parsers.ParseRepositoryTag(repoAndTag)
|
||||
if repoName == "" || repoName == parsedRepo {
|
||||
@@ -91,7 +92,7 @@ func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, fi
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(repos) <= 1 {
|
||||
if len(repos) <= 1 || (len(repoAndTags) <= 1 && deleteByID) {
|
||||
if err := daemon.canDeleteImage(img.ID, force); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,6 +25,40 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
base, err := daemon.getInspectData(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.ContainerJSON{base, container.Config}, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) ContainerInspectRaw(name string) (*types.ContainerJSONRaw, error) {
|
||||
container, err := daemon.Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
base, err := daemon.getInspectData(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := &types.ContainerConfig{
|
||||
container.Config,
|
||||
container.hostConfig.Memory,
|
||||
container.hostConfig.MemorySwap,
|
||||
container.hostConfig.CpuShares,
|
||||
container.hostConfig.CpusetCpus,
|
||||
}
|
||||
|
||||
return &types.ContainerJSONRaw{base, config}, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSONBase, error) {
|
||||
// make a copy to play with
|
||||
hostConfig := *container.hostConfig
|
||||
|
||||
@@ -60,12 +94,11 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
|
||||
volumesRW[m.Destination] = m.RW
|
||||
}
|
||||
|
||||
contJSON := &types.ContainerJSON{
|
||||
contJSONBase := &types.ContainerJSONBase{
|
||||
Id: container.ID,
|
||||
Created: container.Created,
|
||||
Path: container.Path,
|
||||
Args: container.Args,
|
||||
Config: container.Config,
|
||||
State: containerState,
|
||||
Image: container.ImageID,
|
||||
NetworkSettings: container.NetworkSettings,
|
||||
@@ -86,7 +119,7 @@ func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error
|
||||
HostConfig: &hostConfig,
|
||||
}
|
||||
|
||||
return contJSON, nil
|
||||
return contJSONBase, nil
|
||||
}
|
||||
|
||||
func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package logger
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -40,14 +41,27 @@ func (c *Copier) Run() {
|
||||
|
||||
func (c *Copier) copySrc(name string, src io.Reader) {
|
||||
defer c.copyJobs.Done()
|
||||
scanner := bufio.NewScanner(src)
|
||||
for scanner.Scan() {
|
||||
if err := c.dst.Log(&Message{ContainerID: c.cid, Line: scanner.Bytes(), Source: name, Timestamp: time.Now().UTC()}); err != nil {
|
||||
logrus.Errorf("Failed to log msg %q for logger %s: %s", scanner.Bytes(), c.dst.Name(), err)
|
||||
reader := bufio.NewReader(src)
|
||||
|
||||
for {
|
||||
line, err := reader.ReadBytes('\n')
|
||||
line = bytes.TrimSuffix(line, []byte{'\n'})
|
||||
|
||||
// ReadBytes can return full or partial output even when it failed.
|
||||
// e.g. it can return a full entry and EOF.
|
||||
if err == nil || len(line) > 0 {
|
||||
if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil {
|
||||
logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
logrus.Errorf("Error scanning log stream: %s", err)
|
||||
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
logrus.Errorf("Error scanning log stream: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,14 +3,17 @@
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/syslog"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
)
|
||||
|
||||
const name = "syslog"
|
||||
@@ -27,7 +30,18 @@ func init() {
|
||||
|
||||
func New(ctx logger.Context) (logger.Logger, error) {
|
||||
tag := ctx.ContainerID[:12]
|
||||
log, err := syslog.New(syslog.LOG_DAEMON, fmt.Sprintf("%s/%s", path.Base(os.Args[0]), tag))
|
||||
|
||||
proto, address, err := parseAddress(ctx.Config["syslog-address"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log, err := syslog.Dial(
|
||||
proto,
|
||||
address,
|
||||
syslog.LOG_DAEMON,
|
||||
path.Base(os.Args[0])+"/"+tag,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -55,3 +69,33 @@ func (s *Syslog) Name() string {
|
||||
func (s *Syslog) GetReader() (io.Reader, error) {
|
||||
return nil, logger.ReadLogsNotSupported
|
||||
}
|
||||
|
||||
func parseAddress(address string) (string, string, error) {
|
||||
if urlutil.IsTransportURL(address) {
|
||||
url, err := url.Parse(address)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// unix socket validation
|
||||
if url.Scheme == "unix" {
|
||||
if _, err := os.Stat(url.Path); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return url.Scheme, url.Path, nil
|
||||
}
|
||||
|
||||
// here we process tcp|udp
|
||||
host := url.Host
|
||||
if _, _, err := net.SplitHostPort(host); err != nil {
|
||||
if !strings.Contains(err.Error(), "missing port in address") {
|
||||
return "", "", err
|
||||
}
|
||||
host = host + ":514"
|
||||
}
|
||||
|
||||
return url.Scheme, host, nil
|
||||
}
|
||||
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
@@ -15,13 +15,23 @@ func (daemon *Daemon) ContainerStats(name string, stream bool, out io.Writer) er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pre_cpu_stats types.CpuStats
|
||||
for first_v := range updates {
|
||||
first_update := first_v.(*execdriver.ResourceStats)
|
||||
first_stats := convertToAPITypes(first_update.Stats)
|
||||
pre_cpu_stats = first_stats.CpuStats
|
||||
pre_cpu_stats.SystemUsage = first_update.SystemUsage
|
||||
break
|
||||
}
|
||||
enc := json.NewEncoder(out)
|
||||
for v := range updates {
|
||||
update := v.(*execdriver.ResourceStats)
|
||||
ss := convertToAPITypes(update.Stats)
|
||||
ss.PreCpuStats = pre_cpu_stats
|
||||
ss.MemoryStats.Limit = uint64(update.MemoryLimit)
|
||||
ss.Read = update.Read
|
||||
ss.CpuStats.SystemUsage = update.SystemUsage
|
||||
pre_cpu_stats = ss.CpuStats
|
||||
if err := enc.Encode(ss); err != nil {
|
||||
// TODO: handle the specific broken pipe
|
||||
daemon.UnsubscribeToContainerStats(name, updates)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/volume"
|
||||
@@ -74,7 +75,7 @@ func parseBindMount(spec string, mountLabel string, config *runconfig.Config) (*
|
||||
return nil, fmt.Errorf("Invalid volume specification: %s", spec)
|
||||
}
|
||||
|
||||
name, source, err := parseVolumeSource(arr[0], config)
|
||||
name, source, err := parseVolumeSource(arr[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -238,9 +239,9 @@ func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runc
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyOldVolumesInfo ports volumes configured for the containers pre docker 1.7.
|
||||
// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7.
|
||||
// It reads the container configuration and creates valid mount points for the old volumes.
|
||||
func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
|
||||
func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
|
||||
jsonPath, err := container.jsonPath()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -268,18 +269,59 @@ func (daemon *Daemon) verifyOldVolumesInfo(container *Container) error {
|
||||
|
||||
for destination, hostPath := range vols.Volumes {
|
||||
vfsPath := filepath.Join(daemon.root, "vfs", "dir")
|
||||
rw := vols.VolumesRW != nil && vols.VolumesRW[destination]
|
||||
|
||||
if strings.HasPrefix(hostPath, vfsPath) {
|
||||
id := filepath.Base(hostPath)
|
||||
|
||||
rw := vols.VolumesRW != nil && vols.VolumesRW[destination]
|
||||
if err := daemon.migrateVolume(id, hostPath); err != nil {
|
||||
return err
|
||||
}
|
||||
container.addLocalMountPoint(id, destination, rw)
|
||||
} else { // Bind mount
|
||||
id, source, err := parseVolumeSource(hostPath)
|
||||
// We should not find an error here coming
|
||||
// from the old configuration, but who knows.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.addBindMountPoint(id, source, destination, rw)
|
||||
}
|
||||
}
|
||||
|
||||
return container.ToDisk()
|
||||
}
|
||||
|
||||
// migrateVolume moves the contents of a volume created pre Docker 1.7
|
||||
// to the location expected by the local driver. Steps:
|
||||
// 1. Save old directory that includes old volume's config json file.
|
||||
// 2. Move virtual directory with content to where the local driver expects it to be.
|
||||
// 3. Remove the backup of the old volume config.
|
||||
func (daemon *Daemon) migrateVolume(id, vfs string) error {
|
||||
volumeInfo := filepath.Join(daemon.root, defaultVolumesPathName, id)
|
||||
backup := filepath.Join(daemon.root, defaultVolumesPathName, id+".back")
|
||||
|
||||
var err error
|
||||
if err = os.Rename(volumeInfo, backup); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// Put old configuration back in place in case one of the next steps fails.
|
||||
if err != nil {
|
||||
os.Rename(backup, volumeInfo)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = os.Rename(vfs, volumeInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.RemoveAll(backup); err != nil {
|
||||
logrus.Errorf("Unable to remove volume info backup directory %s: %v", backup, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createVolume(name, driverName string) (volume.Volume, error) {
|
||||
vd, err := getVolumeDriver(driverName)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,7 +5,6 @@ package daemon
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/docker/docker/volume/drivers"
|
||||
)
|
||||
@@ -17,7 +16,7 @@ func getVolumeDriver(name string) (volume.Driver, error) {
|
||||
return volumedrivers.Lookup(name)
|
||||
}
|
||||
|
||||
func parseVolumeSource(spec string, config *runconfig.Config) (string, string, error) {
|
||||
func parseVolumeSource(spec string) (string, string, error) {
|
||||
if !filepath.IsAbs(spec) {
|
||||
return spec, "", nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/docker/docker/volume/drivers"
|
||||
)
|
||||
@@ -15,7 +14,7 @@ func getVolumeDriver(_ string) (volume.Driver, error) {
|
||||
return volumedrivers.Lookup(volume.DefaultDriverName)
|
||||
}
|
||||
|
||||
func parseVolumeSource(spec string, _ *runconfig.Config) (string, string, error) {
|
||||
func parseVolumeSource(spec string) (string, string, error) {
|
||||
if !filepath.IsAbs(spec) {
|
||||
return "", "", fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", spec)
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -161,5 +162,9 @@ func main() {
|
||||
}
|
||||
|
||||
func showVersion() {
|
||||
fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
|
||||
if utils.ExperimentalBuild() {
|
||||
fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
|
||||
} else {
|
||||
fmt.Printf("Docker version %s, build %s\n", dockerversion.VERSION, dockerversion.GITCOMMIT)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ Docker has two primary branches for documentation:
|
||||
| Branch | Description | URL (published via commit-hook) |
|
||||
|----------|--------------------------------|------------------------------------------------------------------------------|
|
||||
| `docs` | Official release documentation | [https://docs.docker.com](https://docs.docker.com) |
|
||||
| `master` | Merged but unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) |
|
||||
| `master` | Merged but unreleased development work | [http://docs.master.dockerproject.org](http://docs.master.dockerproject.org) |
|
||||
|
||||
Additions and updates to upcoming releases are made in a feature branch off of
|
||||
the `master` branch. The Docker maintainers also support a `docs` branch that
|
||||
@@ -22,7 +22,7 @@ After a release, documentation updates are continually merged into `master` as
|
||||
they occur. This work includes new documentation for forthcoming features, bug
|
||||
fixes, and other updates. Docker's CI system automatically builds and updates
|
||||
the `master` documentation after each merge and posts it to
|
||||
[http://docs.master.dockerproject.com](http://docs.master.dockerproject.com).
|
||||
[http://docs.master.dockerproject.org](http://docs.master.dockerproject.org).
|
||||
|
||||
Periodically, the Docker maintainers update `docs.docker.com` between official
|
||||
releases of Docker. They do this by cherry-picking commits from `master`,
|
||||
|
||||
@@ -33,6 +33,7 @@ docker-create - Create a new container
|
||||
[**--link**[=*[]*]]
|
||||
[**--lxc-conf**[=*[]*]]
|
||||
[**--log-driver**[=*[]*]]
|
||||
[**--log-opt**[=*[]*]]
|
||||
[**-m**|**--memory**[=*MEMORY*]]
|
||||
[**--memory-swap**[=*MEMORY-SWAP*]]
|
||||
[**--mac-address**[=*MAC-ADDRESS*]]
|
||||
@@ -148,6 +149,9 @@ two memory nodes.
|
||||
Logging driver for container. Default is defined by daemon `--log-driver` flag.
|
||||
**Warning**: `docker logs` command works only for `json-file` logging driver.
|
||||
|
||||
**--log-opt**=[]
|
||||
Logging driver specific options.
|
||||
|
||||
**-m**, **--memory**=""
|
||||
Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ docker-run - Run a command in a new container
|
||||
[**--link**[=*[]*]]
|
||||
[**--lxc-conf**[=*[]*]]
|
||||
[**--log-driver**[=*[]*]]
|
||||
[**--log-opt**[=*[]*]]
|
||||
[**-m**|**--memory**[=*MEMORY*]]
|
||||
[**--memory-swap**[=*MEMORY-SWAP*]]
|
||||
[**--mac-address**[=*MAC-ADDRESS*]]
|
||||
@@ -255,6 +256,9 @@ which interface and port to use.
|
||||
Logging driver for container. Default is defined by daemon `--log-driver` flag.
|
||||
**Warning**: `docker logs` command works only for `json-file` logging driver.
|
||||
|
||||
**--log-opt**=[]
|
||||
Logging driver specific options.
|
||||
|
||||
**-m**, **--memory**=""
|
||||
Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
|
||||
|
||||
|
||||
@@ -105,6 +105,9 @@ unix://[/path/to/socket] to use.
|
||||
Default driver for container logs. Default is `json-file`.
|
||||
**Warning**: `docker logs` command works only for `json-file` logging driver.
|
||||
|
||||
**--log-opt**=[]
|
||||
Logging driver specific options.
|
||||
|
||||
**--mtu**=VALUE
|
||||
Set the containers network mtu. Default is `0`.
|
||||
|
||||
|
||||
@@ -86,8 +86,8 @@ program code and documentation code.
|
||||
|
||||
## Merges after pull requests
|
||||
|
||||
* After a merge, [a master build](https://master.dockerproject.com/) is
|
||||
* After a merge, [a master build](https://master.dockerproject.org/) is
|
||||
available almost immediately.
|
||||
|
||||
* If you made a documentation change, you can see it at
|
||||
[docs.master.dockerproject.com](http://docs.master.dockerproject.com/).
|
||||
[docs.master.dockerproject.org](http://docs.master.dockerproject.org/).
|
||||
|
||||
@@ -108,7 +108,7 @@ It can take time to see a merged pull request in Docker's official release.
|
||||
A master build is available almost immediately though. Docker builds and
|
||||
updates its development binaries after each merge to `master`.
|
||||
|
||||
1. Browse to <a href="https://master.dockerproject.com/" target="_blank">https://master.dockerproject.com/</a>.
|
||||
1. Browse to <a href="https://master.dockerproject.org/" target="_blank">https://master.dockerproject.org/</a>.
|
||||
|
||||
2. Look for the binary appropriate to your system.
|
||||
|
||||
@@ -117,7 +117,7 @@ updates its development binaries after each merge to `master`.
|
||||
You might want to run the binary in a container though. This
|
||||
will keep your local host environment clean.
|
||||
|
||||
4. View any documentation changes at <a href="http://docs.master.dockerproject.com/" target="_blank">docs.master.dockerproject.com</a>.
|
||||
4. View any documentation changes at <a href="http://docs.master.dockerproject.org/" target="_blank">docs.master.dockerproject.org</a>.
|
||||
|
||||
Once you've verified everything merged, feel free to delete your feature branch
|
||||
from your fork. For information on how to do this,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -169,6 +169,7 @@ expect an integer, and they can only be specified once.
|
||||
-l, --log-level="info" Set the logging level
|
||||
--label=[] Set key=value labels to the daemon
|
||||
--log-driver="json-file" Default driver for container logs
|
||||
--log-opt=[] Log driver specific options
|
||||
--mtu=0 Set the containers network MTU
|
||||
-p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file
|
||||
--registry-mirror=[] Preferred Docker registry mirror
|
||||
@@ -282,6 +283,18 @@ set `zfs.fsname` option as described in [Storage driver options](#storage-driver
|
||||
The `overlay` is a very fast union filesystem. It is now merged in the main
|
||||
Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137).
|
||||
Call `docker -d -s overlay` to use it.
|
||||
> **Note:**
|
||||
> As promising as `overlay` is, the feature is still quite young and should not
|
||||
> be used in production. Most notably, using `overlay` can cause excessive
|
||||
> inode consumption (especially as the number of images grows), as well as
|
||||
> being incompatible with the use of RPMs.
|
||||
|
||||
> **Note:**
|
||||
> As promising as `overlay` is, the feature is still quite young and should not
|
||||
> be used in production. Most notably, using `overlay` can cause excessive
|
||||
> inode consumption (especially as the number of images grows), as well as
|
||||
> being incompatible with the use of RPMs.
|
||||
|
||||
> **Note:**
|
||||
> It is currently unsupported on `btrfs` or any Copy on Write filesystem
|
||||
> and should only be used over `ext4` partitions.
|
||||
@@ -984,6 +997,7 @@ Creates a new container.
|
||||
--label-file=[] Read in a line delimited file of labels
|
||||
--link=[] Add link to another container
|
||||
--log-driver="" Logging driver for container
|
||||
--log-opt=[] Log driver specific options
|
||||
--lxc-conf=[] Add custom lxc options
|
||||
-m, --memory="" Memory limit
|
||||
--mac-address="" Container MAC address (e.g. 92:d0:c6:0a:29:33)
|
||||
@@ -1948,6 +1962,7 @@ To remove an image using its digest:
|
||||
--ipc="" IPC namespace to use
|
||||
--link=[] Add link to another container
|
||||
--log-driver="" Logging driver for container
|
||||
--log-opt=[] Log driver specific options
|
||||
--lxc-conf=[] Add custom lxc options
|
||||
-m, --memory="" Memory limit
|
||||
-l, --label=[] Set metadata on the container (e.g., --label=com.example.key=value)
|
||||
|
||||
@@ -873,18 +873,31 @@ this driver.
|
||||
Default logging driver for Docker. Writes JSON messages to file. `docker logs`
|
||||
command is available only for this logging driver
|
||||
|
||||
The following logging options are supported for this logging driver: [none]
|
||||
|
||||
#### Logging driver: syslog
|
||||
|
||||
Syslog logging driver for Docker. Writes log messages to syslog. `docker logs`
|
||||
command is not available for this logging driver
|
||||
|
||||
The following logging options are supported for this logging driver:
|
||||
|
||||
--log-opt address=[tcp|udp]://host:port
|
||||
--log-opt address=unix://path
|
||||
|
||||
`address` specifies the remote syslog server address where the driver connects to.
|
||||
If not specified it defaults to the local unix socket of the running system.
|
||||
If transport is either `tcp` or `udp` and `port` is not specified it defaults to `514`
|
||||
The following example shows how to have the `syslog` driver connect to a `syslog`
|
||||
remote server at `192.168.0.42` on port `123`
|
||||
|
||||
$ docker run --log-driver=syslog --log-opt address=tcp://192.168.0.42:123
|
||||
|
||||
#### Logging driver: journald
|
||||
|
||||
Journald logging driver for Docker. Writes log messages to journald; the container id will be stored in the journal's `CONTAINER_ID` field. `docker logs` command is not available for this logging driver. For detailed information on working with this logging driver, see [the journald logging driver](reference/logging/journald) reference documentation.
|
||||
|
||||
#### Log Opts :
|
||||
|
||||
Logging options for configuring a log driver. The following log options are supported: [none]
|
||||
The following logging options are supported for this logging driver: [none]
|
||||
|
||||
## Overriding Dockerfile image defaults
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ notation. Use the following guidelines to name your keys:
|
||||
reverse DNS notation of a domain controlled by the author. For
|
||||
example, `com.example.some-label`.
|
||||
|
||||
- The `com.docker.*`, `io.docker.*` and `com.dockerproject.*` namespaces are
|
||||
- The `com.docker.*`, `io.docker.*` and `org.dockerproject.*` namespaces are
|
||||
reserved for Docker's internal use.
|
||||
|
||||
- Keys should only consist of lower-cased alphanumeric characters,
|
||||
|
||||
@@ -8,92 +8,164 @@ import (
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/trust"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// loadManifest loads a manifest from a byte array and verifies its content.
|
||||
// The signature must be verified or an error is returned. If the manifest
|
||||
// contains no signatures by a trusted key for the name in the manifest, the
|
||||
// image is not considered verified. The parsed manifest object and a boolean
|
||||
// for whether the manifest is verified is returned.
|
||||
func (s *TagStore) loadManifest(manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) {
|
||||
sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures")
|
||||
// loadManifest loads a manifest from a byte array and verifies its content,
|
||||
// returning the local digest, the manifest itself, whether or not it was
|
||||
// verified. If ref is a digest, rather than a tag, this will be treated as
|
||||
// the local digest. An error will be returned if the signature verification
|
||||
// fails, local digest verification fails and, if provided, the remote digest
|
||||
// verification fails. The boolean return will only be false without error on
|
||||
// the failure of signatures trust check.
|
||||
func (s *TagStore) loadManifest(manifestBytes []byte, ref string, remoteDigest digest.Digest) (digest.Digest, *registry.ManifestData, bool, error) {
|
||||
payload, keys, err := unpackSignedManifest(manifestBytes)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error parsing payload: %s", err)
|
||||
return "", nil, false, fmt.Errorf("error unpacking manifest: %v", err)
|
||||
}
|
||||
|
||||
keys, err := sig.Verify()
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error verifying payload: %s", err)
|
||||
}
|
||||
// TODO(stevvooe): It would be a lot better here to build up a stack of
|
||||
// verifiers, then push the bytes one time for signatures and digests, but
|
||||
// the manifests are typically small, so this optimization is not worth
|
||||
// hacking this code without further refactoring.
|
||||
|
||||
payload, err := sig.Payload()
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error retrieving payload: %s", err)
|
||||
}
|
||||
var localDigest digest.Digest
|
||||
|
||||
var manifestDigest digest.Digest
|
||||
// Verify the local digest, if present in ref. ParseDigest will validate
|
||||
// that the ref is a digest and verify against that if present. Otherwize
|
||||
// (on error), we simply compute the localDigest and proceed.
|
||||
if dgst, err := digest.ParseDigest(ref); err == nil {
|
||||
// verify the manifest against local ref
|
||||
if err := verifyDigest(dgst, payload); err != nil {
|
||||
return "", nil, false, fmt.Errorf("verifying local digest: %v", err)
|
||||
}
|
||||
|
||||
if dgst != "" {
|
||||
manifestDigest, err = digest.ParseDigest(dgst)
|
||||
localDigest = dgst
|
||||
} else {
|
||||
// We don't have a local digest, since we are working from a tag.
|
||||
// Compute the digest of the payload and return that.
|
||||
logrus.Debugf("provided manifest reference %q is not a digest: %v", ref, err)
|
||||
localDigest, err = digest.FromBytes(payload)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("invalid manifest digest from registry: %s", err)
|
||||
}
|
||||
|
||||
dgstVerifier, err := digest.NewDigestVerifier(manifestDigest)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("unable to verify manifest digest from registry: %s", err)
|
||||
}
|
||||
|
||||
dgstVerifier.Write(payload)
|
||||
|
||||
if !dgstVerifier.Verified() {
|
||||
computedDigest, _ := digest.FromBytes(payload)
|
||||
return nil, false, fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", manifestDigest, computedDigest)
|
||||
// near impossible
|
||||
logrus.Errorf("error calculating local digest during tag pull: %v", err)
|
||||
return "", nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
if utils.DigestReference(ref) && ref != manifestDigest.String() {
|
||||
return nil, false, fmt.Errorf("mismatching image manifest digest: got %q, expected %q", manifestDigest, ref)
|
||||
// verify against the remote digest, if available
|
||||
if remoteDigest != "" {
|
||||
if err := verifyDigest(remoteDigest, payload); err != nil {
|
||||
return "", nil, false, fmt.Errorf("verifying remote digest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var manifest registry.ManifestData
|
||||
if err := json.Unmarshal(payload, &manifest); err != nil {
|
||||
return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)
|
||||
return "", nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)
|
||||
}
|
||||
if manifest.SchemaVersion != 1 {
|
||||
return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion)
|
||||
|
||||
// validate the contents of the manifest
|
||||
if err := validateManifest(&manifest); err != nil {
|
||||
return "", nil, false, err
|
||||
}
|
||||
|
||||
var verified bool
|
||||
verified, err = s.verifyTrustedKeys(manifest.Name, keys)
|
||||
if err != nil {
|
||||
return "", nil, false, fmt.Errorf("error verifying trusted keys: %v", err)
|
||||
}
|
||||
|
||||
return localDigest, &manifest, verified, nil
|
||||
}
|
||||
|
||||
// unpackSignedManifest takes the raw, signed manifest bytes, unpacks the jws
|
||||
// and returns the payload and public keys used to signed the manifest.
|
||||
// Signatures are verified for authenticity but not against the trust store.
|
||||
func unpackSignedManifest(p []byte) ([]byte, []libtrust.PublicKey, error) {
|
||||
sig, err := libtrust.ParsePrettySignature(p, "signatures")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error parsing payload: %s", err)
|
||||
}
|
||||
|
||||
keys, err := sig.Verify()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error verifying payload: %s", err)
|
||||
}
|
||||
|
||||
payload, err := sig.Payload()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error retrieving payload: %s", err)
|
||||
}
|
||||
|
||||
return payload, keys, nil
|
||||
}
|
||||
|
||||
// verifyTrustedKeys checks the keys provided against the trust store,
|
||||
// ensuring that the provided keys are trusted for the namespace. The keys
|
||||
// provided from this method must come from the signatures provided as part of
|
||||
// the manifest JWS package, obtained from unpackSignedManifest or libtrust.
|
||||
func (s *TagStore) verifyTrustedKeys(namespace string, keys []libtrust.PublicKey) (verified bool, err error) {
|
||||
if namespace[0] != '/' {
|
||||
namespace = "/" + namespace
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
namespace := manifest.Name
|
||||
if namespace[0] != '/' {
|
||||
namespace = "/" + namespace
|
||||
}
|
||||
b, err := key.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error marshalling public key: %s", err)
|
||||
return false, fmt.Errorf("error marshalling public key: %s", err)
|
||||
}
|
||||
// Check key has read/write permission (0x03)
|
||||
v, err := s.trustService.CheckKey(namespace, b, 0x03)
|
||||
if err != nil {
|
||||
vErr, ok := err.(trust.NotVerifiedError)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("error running key check: %s", err)
|
||||
return false, fmt.Errorf("error running key check: %s", err)
|
||||
}
|
||||
logrus.Debugf("Key check result: %v", vErr)
|
||||
}
|
||||
verified = v
|
||||
if verified {
|
||||
logrus.Debug("Key check result: verified")
|
||||
}
|
||||
}
|
||||
return &manifest, verified, nil
|
||||
|
||||
if verified {
|
||||
logrus.Debug("Key check result: verified")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func checkValidManifest(manifest *registry.ManifestData) error {
|
||||
// verifyDigest checks the contents of p against the provided digest. Note
|
||||
// that for manifests, this is the signed payload and not the raw bytes with
|
||||
// signatures.
|
||||
func verifyDigest(dgst digest.Digest, p []byte) error {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return fmt.Errorf("error validating digest %q: %v", dgst, err)
|
||||
}
|
||||
|
||||
verifier, err := digest.NewDigestVerifier(dgst)
|
||||
if err != nil {
|
||||
// There are not many ways this can go wrong: if it does, its
|
||||
// fatal. Likley, the cause would be poor validation of the
|
||||
// incoming reference.
|
||||
return fmt.Errorf("error creating verifier for digest %q: %v", dgst, err)
|
||||
}
|
||||
|
||||
if _, err := verifier.Write(p); err != nil {
|
||||
return fmt.Errorf("error writing payload to digest verifier (verifier target %q): %v", dgst, err)
|
||||
}
|
||||
|
||||
if !verifier.Verified() {
|
||||
return fmt.Errorf("verification against digest %q failed", dgst)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateManifest(manifest *registry.ManifestData) error {
|
||||
if manifest.SchemaVersion != 1 {
|
||||
return fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion)
|
||||
}
|
||||
|
||||
if len(manifest.FSLayers) != len(manifest.History) {
|
||||
return fmt.Errorf("length of history not equal to number of layers")
|
||||
}
|
||||
|
||||
@@ -8,11 +8,13 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -181,3 +183,121 @@ func TestManifestTarsumCache(t *testing.T) {
|
||||
t.Fatalf("Unexpected json value\nExpected:\n%s\nActual:\n%s", v1compat, manifest.History[0].V1Compatibility)
|
||||
}
|
||||
}
|
||||
|
||||
// TestManifestDigestCheck ensures that loadManifest properly verifies the
|
||||
// remote and local digest.
|
||||
func TestManifestDigestCheck(t *testing.T) {
|
||||
tmp, err := utils.TestDirectory("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
store := mkTestTagStore(tmp, t)
|
||||
defer store.graph.driver.Cleanup()
|
||||
|
||||
archive, err := fakeTar()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
img := &image.Image{ID: testManifestImageID}
|
||||
if err := store.graph.Register(img, archive); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.Tag(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if cs, err := img.GetCheckSum(store.graph.ImageRoot(testManifestImageID)); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if cs != "" {
|
||||
t.Fatalf("Non-empty checksum file after register")
|
||||
}
|
||||
|
||||
// Generate manifest
|
||||
payload, err := store.newManifest(testManifestImageName, testManifestImageName, testManifestTag)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error generating test manifest: %v", err)
|
||||
}
|
||||
|
||||
pk, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error generating private key: %v", err)
|
||||
}
|
||||
|
||||
sig, err := libtrust.NewJSONSignature(payload)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := sig.Sign(pk); err != nil {
|
||||
t.Fatalf("error signing manifest bytes: %v", err)
|
||||
}
|
||||
|
||||
signedBytes, err := sig.PrettySignature("signatures")
|
||||
if err != nil {
|
||||
t.Fatalf("error getting signed bytes: %v", err)
|
||||
}
|
||||
|
||||
dgst, err := digest.FromBytes(payload)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting digest of manifest: %v", err)
|
||||
}
|
||||
|
||||
// use this as the "bad" digest
|
||||
zeroDigest, err := digest.FromBytes([]byte{})
|
||||
if err != nil {
|
||||
t.Fatalf("error making zero digest: %v", err)
|
||||
}
|
||||
|
||||
// Remote and local match, everything should look good
|
||||
local, _, _, err := store.loadManifest(signedBytes, dgst.String(), dgst)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error verifying local and remote digest: %v", err)
|
||||
}
|
||||
|
||||
if local != dgst {
|
||||
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||
}
|
||||
|
||||
// remote and no local, since pulling by tag
|
||||
local, _, _, err = store.loadManifest(signedBytes, "tag", dgst)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error verifying tag pull and remote digest: %v", err)
|
||||
}
|
||||
|
||||
if local != dgst {
|
||||
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||
}
|
||||
|
||||
// remote and differing local, this is the most important to fail
|
||||
local, _, _, err = store.loadManifest(signedBytes, zeroDigest.String(), dgst)
|
||||
if err == nil {
|
||||
t.Fatalf("error expected when verifying with differing local digest")
|
||||
}
|
||||
|
||||
// no remote, no local (by tag)
|
||||
local, _, _, err = store.loadManifest(signedBytes, "tag", "")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error verifying manifest without remote digest: %v", err)
|
||||
}
|
||||
|
||||
if local != dgst {
|
||||
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||
}
|
||||
|
||||
// no remote, with local
|
||||
local, _, _, err = store.loadManifest(signedBytes, dgst.String(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error verifying manifest without remote digest: %v", err)
|
||||
}
|
||||
|
||||
if local != dgst {
|
||||
t.Fatalf("local digest not correctly calculated: %v", err)
|
||||
}
|
||||
|
||||
// bad remote, we fail the check.
|
||||
local, _, _, err = store.loadManifest(signedBytes, dgst.String(), zeroDigest)
|
||||
if err == nil {
|
||||
t.Fatalf("error expected when verifying with differing remote digest")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,7 +142,6 @@ func makeMirrorRepoInfo(repoInfo *registry.RepositoryInfo, mirror string) *regis
|
||||
|
||||
func configureV2Mirror(repoInfo *registry.RepositoryInfo, s *registry.Service) (*registry.Endpoint, *registry.RepositoryInfo, error) {
|
||||
mirrors := repoInfo.Index.Mirrors
|
||||
|
||||
if len(mirrors) == 0 {
|
||||
// no mirrors configured
|
||||
return nil, nil, nil
|
||||
@@ -151,13 +150,11 @@ func configureV2Mirror(repoInfo *registry.RepositoryInfo, s *registry.Service) (
|
||||
v1MirrorCount := 0
|
||||
var v2MirrorEndpoint *registry.Endpoint
|
||||
var v2MirrorRepoInfo *registry.RepositoryInfo
|
||||
var lastErr error
|
||||
for _, mirror := range mirrors {
|
||||
mirrorRepoInfo := makeMirrorRepoInfo(repoInfo, mirror)
|
||||
endpoint, err := registry.NewEndpoint(mirrorRepoInfo.Index, nil)
|
||||
if err != nil {
|
||||
logrus.Errorf("Unable to create endpoint for %s: %s", mirror, err)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
if endpoint.Version == 2 {
|
||||
@@ -182,9 +179,11 @@ func configureV2Mirror(repoInfo *registry.RepositoryInfo, s *registry.Service) (
|
||||
return v2MirrorEndpoint, v2MirrorRepoInfo, nil
|
||||
}
|
||||
if v2MirrorEndpoint != nil && v1MirrorCount > 0 {
|
||||
lastErr = fmt.Errorf("v1 and v2 mirrors configured")
|
||||
return nil, nil, fmt.Errorf("v1 and v2 mirrors configured")
|
||||
}
|
||||
return nil, nil, lastErr
|
||||
// No endpoint could be established with the given mirror configurations
|
||||
// Fallback to pulling from the hub as per v1 behavior.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (s *TagStore) pullFromV2Mirror(mirrorEndpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo,
|
||||
@@ -458,17 +457,6 @@ func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamF
|
||||
}
|
||||
}
|
||||
|
||||
// downloadInfo is used to pass information from download to extractor
|
||||
type downloadInfo struct {
|
||||
imgJSON []byte
|
||||
img *image.Image
|
||||
digest digest.Digest
|
||||
tmpFile *os.File
|
||||
length int64
|
||||
downloaded bool
|
||||
err chan error
|
||||
}
|
||||
|
||||
func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error {
|
||||
endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
|
||||
if err != nil {
|
||||
@@ -518,27 +506,34 @@ func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo
|
||||
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
|
||||
logrus.Debugf("Pulling tag from V2 registry: %q", tag)
|
||||
|
||||
manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
|
||||
remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// loadManifest ensures that the manifest payload has the expected digest
|
||||
// if the tag is a digest reference.
|
||||
manifest, verified, err := s.loadManifest(manifestBytes, manifestDigest, tag)
|
||||
localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error verifying manifest: %s", err)
|
||||
}
|
||||
|
||||
if err := checkValidManifest(manifest); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if verified {
|
||||
logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
|
||||
}
|
||||
out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))
|
||||
|
||||
// downloadInfo is used to pass information from download to extractor
|
||||
type downloadInfo struct {
|
||||
imgJSON []byte
|
||||
img *image.Image
|
||||
digest digest.Digest
|
||||
tmpFile *os.File
|
||||
length int64
|
||||
downloaded bool
|
||||
err chan error
|
||||
}
|
||||
|
||||
downloads := make([]downloadInfo, len(manifest.FSLayers))
|
||||
|
||||
for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
|
||||
@@ -611,8 +606,7 @@ func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *regis
|
||||
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))
|
||||
|
||||
if !verifier.Verified() {
|
||||
logrus.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
|
||||
verified = false
|
||||
return fmt.Errorf("image layer digest verification failed for %q", di.digest)
|
||||
}
|
||||
|
||||
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
|
||||
@@ -689,15 +683,33 @@ func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *regis
|
||||
out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
|
||||
}
|
||||
|
||||
if manifestDigest != "" {
|
||||
out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest))
|
||||
if localDigest != remoteDigest { // this is not a verification check.
|
||||
// NOTE(stevvooe): This is a very defensive branch and should never
|
||||
// happen, since all manifest digest implementations use the same
|
||||
// algorithm.
|
||||
logrus.WithFields(
|
||||
logrus.Fields{
|
||||
"local": localDigest,
|
||||
"remote": remoteDigest,
|
||||
}).Debugf("local digest does not match remote")
|
||||
|
||||
out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest))
|
||||
}
|
||||
|
||||
if utils.DigestReference(tag) {
|
||||
if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
|
||||
out.Write(sf.FormatStatus("", "Digest: %s", localDigest))
|
||||
|
||||
if tag == localDigest.String() {
|
||||
// TODO(stevvooe): Ideally, we should always set the digest so we can
|
||||
// use the digest whether we pull by it or not. Unfortunately, the tag
|
||||
// store treats the digest as a separate tag, meaning there may be an
|
||||
// untagged digest image that would seem to be dangling by a user.
|
||||
|
||||
if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
}
|
||||
|
||||
if !utils.DigestReference(tag) {
|
||||
// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
|
||||
if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -413,7 +413,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o
|
||||
m.History[i] = ®istry.ManifestHistory{V1Compatibility: string(jsonData)}
|
||||
}
|
||||
|
||||
if err := checkValidManifest(m); err != nil {
|
||||
if err := validateManifest(m); err != nil {
|
||||
return fmt.Errorf("invalid manifest: %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
_ "github.com/docker/docker/daemon/graphdriver/vfs" // import the vfs driver so it is used in the tests
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/trust"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
@@ -60,9 +61,16 @@ func mkTestTagStore(root string, t *testing.T) *TagStore {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
trust, err := trust.NewTrustStore(root + "/trust")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tagCfg := &TagStoreConfig{
|
||||
Graph: graph,
|
||||
Events: events.New(),
|
||||
Trust: trust,
|
||||
}
|
||||
store, err := NewTagStore(path.Join(root, "tags"), tagCfg)
|
||||
if err != nil {
|
||||
|
||||
24
hack/make.sh
24
hack/make.sh
@@ -96,7 +96,6 @@ fi
|
||||
if [ "$DOCKER_EXPERIMENTAL" ]; then
|
||||
echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
|
||||
echo >&2
|
||||
VERSION+="-experimental"
|
||||
DOCKER_BUILDTAGS+=" experimental"
|
||||
fi
|
||||
|
||||
@@ -198,13 +197,13 @@ go_test_dir() {
|
||||
# if our current go install has -cover, we want to use it :)
|
||||
mkdir -p "$DEST/coverprofiles"
|
||||
coverprofile="docker${dir#.}"
|
||||
coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
|
||||
coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}"
|
||||
testcover=( -cover -coverprofile "$coverprofile" $coverpkg )
|
||||
fi
|
||||
(
|
||||
export DEST
|
||||
echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
|
||||
cd "$dir"
|
||||
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
|
||||
test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS
|
||||
)
|
||||
}
|
||||
@@ -217,7 +216,7 @@ test_env() {
|
||||
DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \
|
||||
DOCKER_HOST="$DOCKER_HOST" \
|
||||
GOPATH="$GOPATH" \
|
||||
HOME="$DEST/fake-HOME" \
|
||||
HOME="$ABS_DEST/fake-HOME" \
|
||||
PATH="$PATH" \
|
||||
TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \
|
||||
"$@"
|
||||
@@ -271,11 +270,9 @@ hash_files() {
|
||||
}
|
||||
|
||||
bundle() {
|
||||
bundlescript=$1
|
||||
bundle=$(basename $bundlescript)
|
||||
echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)"
|
||||
mkdir -p "bundles/$VERSION/$bundle"
|
||||
source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle"
|
||||
local bundle="$1"; shift
|
||||
echo "---> Making bundle: $(basename "$bundle") (in $DEST)"
|
||||
source "$SCRIPTDIR/make/$bundle" "$@"
|
||||
}
|
||||
|
||||
main() {
|
||||
@@ -301,7 +298,14 @@ main() {
|
||||
bundles=($@)
|
||||
fi
|
||||
for bundle in ${bundles[@]}; do
|
||||
bundle "$SCRIPTDIR/make/$bundle"
|
||||
export DEST="bundles/$VERSION/$(basename "$bundle")"
|
||||
# Cygdrive paths don't play well with go build -o.
|
||||
if [[ "$(uname -s)" == CYGWIN* ]]; then
|
||||
export DEST="$(cygpath -mw "$DEST")"
|
||||
fi
|
||||
mkdir -p "$DEST"
|
||||
ABS_DEST="$(cd "$DEST" && pwd -P)"
|
||||
bundle "$bundle"
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
Source: docker-engine
|
||||
Maintainer: Docker <support@docker.com>
|
||||
Homepage: https://dockerproject.com
|
||||
Homepage: https://dockerproject.org
|
||||
Vcs-Browser: https://github.com/docker/docker
|
||||
Vcs-Git: git://github.com/docker/docker.git
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ Summary: The open-source application container engine
|
||||
License: ASL 2.0
|
||||
Source: %{name}.tar.gz
|
||||
|
||||
URL: https://dockerproject.com
|
||||
URL: https://dockerproject.org
|
||||
Vendor: Docker
|
||||
Packager: Docker <support@docker.com>
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# see test-integration-cli for example usage of this script
|
||||
|
||||
export PATH="$DEST/../binary:$DEST/../dynbinary:$DEST/../gccgo:$DEST/../dyngccgo:$PATH"
|
||||
export PATH="$ABS_DEST/../binary:$ABS_DEST/../dynbinary:$ABS_DEST/../gccgo:$ABS_DEST/../dyngccgo:$PATH"
|
||||
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo >&2 'error: binary or dynbinary must be run before .integration-daemon-start'
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
BINARY_NAME="docker-$VERSION"
|
||||
BINARY_EXTENSION="$(binary_extension)"
|
||||
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
||||
|
||||
# Cygdrive paths don't play well with go build -o.
|
||||
if [[ "$(uname -s)" == CYGWIN* ]]; then
|
||||
DEST=$(cygpath -mw $DEST)
|
||||
fi
|
||||
|
||||
source "${MAKEDIR}/.go-autogen"
|
||||
|
||||
echo "Building: $DEST/$BINARY_FULLNAME"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
# subshell so that we can export PATH and TZ without breaking other things
|
||||
(
|
||||
export TZ=UTC # make sure our "date" variables are UTC-based
|
||||
|
||||
source "${MAKEDIR}/.integration-daemon-start"
|
||||
|
||||
# TODO consider using frozen images for the dockercore/builder-deb tags
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
# subshell so that we can export PATH and TZ without breaking other things
|
||||
(
|
||||
export TZ=UTC # make sure our "date" variables are UTC-based
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
|
||||
|
||||
# TODO consider using frozen images for the dockercore/builder-rpm tags
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST="$1"
|
||||
|
||||
bundle_cover() {
|
||||
coverprofiles=( "$DEST/../"*"/coverprofiles/"* )
|
||||
for p in "${coverprofiles[@]}"; do
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
|
||||
# explicit list of os/arch combos that support being a daemon
|
||||
declare -A daemonSupporting
|
||||
daemonSupporting=(
|
||||
@@ -21,13 +19,15 @@ fi
|
||||
|
||||
for platform in $DOCKER_CROSSPLATFORMS; do
|
||||
(
|
||||
mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION
|
||||
export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION
|
||||
mkdir -p "$DEST"
|
||||
ABS_DEST="$(cd "$DEST" && pwd -P)"
|
||||
export GOOS=${platform%/*}
|
||||
export GOARCH=${platform##*/}
|
||||
if [ -z "${daemonSupporting[$platform]}" ]; then
|
||||
export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms
|
||||
export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported
|
||||
fi
|
||||
source "${MAKEDIR}/binary" "$DEST/$platform"
|
||||
source "${MAKEDIR}/binary"
|
||||
)
|
||||
done
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
|
||||
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
||||
source "${MAKEDIR}/.dockerinit"
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
|
||||
if [ -z "$DOCKER_CLIENTONLY" ]; then
|
||||
source "${MAKEDIR}/.dockerinit-gccgo"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
BINARY_NAME="docker-$VERSION"
|
||||
BINARY_EXTENSION="$(binary_extension)"
|
||||
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
(
|
||||
source "${MAKEDIR}/.integration-daemon-start"
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
|
||||
bundle_test_integration_cli() {
|
||||
go_test_dir ./integration-cli
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DEST=$1
|
||||
: ${PARALLEL_JOBS:=$(nproc 2>/dev/null || echo 1)} # if nproc fails (usually because we don't have it), let's not parallelize by default
|
||||
|
||||
RED=$'\033[31m'
|
||||
@@ -26,10 +25,9 @@ bundle_test_unit() {
|
||||
export LDFLAGS
|
||||
export TESTFLAGS
|
||||
export HAVE_GO_TEST_COVER
|
||||
export DEST
|
||||
|
||||
# some hack to export array variables
|
||||
export BUILDFLAGS_FILE="buildflags_file"
|
||||
export BUILDFLAGS_FILE="$DEST/buildflags-file"
|
||||
( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE"
|
||||
|
||||
if command -v parallel &> /dev/null; then
|
||||
@@ -59,7 +57,7 @@ go_run_test_dir() {
|
||||
while read dir; do
|
||||
echo
|
||||
echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
|
||||
precompiled="$DEST/precompiled/$dir.test$(binary_extension)"
|
||||
precompiled="$ABS_DEST/precompiled/$dir.test$(binary_extension)"
|
||||
if ! ( cd "$dir" && test_env "$precompiled" $TESTFLAGS ); then
|
||||
TESTS_FAILED+=("$dir")
|
||||
echo
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEST="$1"
|
||||
CROSS="$DEST/../cross"
|
||||
|
||||
set -e
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEST=$1
|
||||
|
||||
PKGVERSION="${VERSION//-/'~'}"
|
||||
# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
|
||||
if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
|
||||
@@ -37,7 +35,7 @@ PACKAGE_LICENSE="Apache-2.0"
|
||||
# Build docker as an ubuntu package using FPM and REPREPRO (sue me).
|
||||
# bundle_binary must be called first.
|
||||
bundle_ubuntu() {
|
||||
DIR=$DEST/build
|
||||
DIR="$ABS_DEST/build"
|
||||
|
||||
# Include our udev rules
|
||||
mkdir -p "$DIR/etc/udev/rules.d"
|
||||
@@ -140,9 +138,9 @@ EOF
|
||||
# create lxc-docker-VERSION package
|
||||
fpm -s dir -C "$DIR" \
|
||||
--name "lxc-docker-$VERSION" --version "$PKGVERSION" \
|
||||
--after-install "$DEST/postinst" \
|
||||
--before-remove "$DEST/prerm" \
|
||||
--after-remove "$DEST/postrm" \
|
||||
--after-install "$ABS_DEST/postinst" \
|
||||
--before-remove "$ABS_DEST/prerm" \
|
||||
--after-remove "$ABS_DEST/postrm" \
|
||||
--architecture "$PACKAGE_ARCHITECTURE" \
|
||||
--prefix / \
|
||||
--depends iptables \
|
||||
|
||||
@@ -266,7 +266,7 @@ Architectures: amd64 i386
|
||||
EOF
|
||||
|
||||
# Add the DEB package to the APT repo
|
||||
DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb
|
||||
DEBFILE=( bundles/$VERSION/ubuntu/lxc-docker*.deb )
|
||||
reprepro -b "$APTDIR" includedeb docker "$DEBFILE"
|
||||
|
||||
# Sign
|
||||
|
||||
@@ -55,12 +55,12 @@ clone hg code.google.com/p/go.net 84a4013f96e0
|
||||
clone hg code.google.com/p/gosqlite 74691fb6f837
|
||||
|
||||
#get libnetwork packages
|
||||
clone git github.com/docker/libnetwork 2da2dc055de5a474c8540871ad88a48213b0994f
|
||||
clone git github.com/docker/libnetwork f72ad20491e8c46d9664da3f32a0eddb301e7c8d
|
||||
clone git github.com/vishvananda/netns 008d17ae001344769b031375bdb38a86219154c6
|
||||
clone git github.com/vishvananda/netlink 8eb64238879fed52fd51c5b30ad20b928fb4c36c
|
||||
|
||||
# get distribution packages
|
||||
clone git github.com/docker/distribution d957768537c5af40e4f4cd96871f7b2bde9e2923
|
||||
clone git github.com/docker/distribution b9eeb328080d367dbde850ec6e94f1e4ac2b5efe
|
||||
mv src/github.com/docker/distribution/digest tmp-digest
|
||||
mv src/github.com/docker/distribution/registry/api tmp-api
|
||||
rm -rf src/github.com/docker/distribution
|
||||
|
||||
@@ -51,6 +51,39 @@ func (s *DockerSuite) TestContainerApiGetAll(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
// regression test for empty json field being omitted #13691
|
||||
func (s *DockerSuite) TestContainerApiGetJSONNoFieldsOmitted(c *check.C) {
|
||||
runCmd := exec.Command(dockerBinary, "run", "busybox", "true")
|
||||
_, err := runCommand(runCmd)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
status, body, err := sockRequest("GET", "/containers/json?all=1", nil)
|
||||
c.Assert(status, check.Equals, http.StatusOK)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// empty Labels field triggered this bug, make sense to check for everything
|
||||
// cause even Ports for instance can trigger this bug
|
||||
// better safe than sorry..
|
||||
fields := []string{
|
||||
"Id",
|
||||
"Names",
|
||||
"Image",
|
||||
"Command",
|
||||
"Created",
|
||||
"Ports",
|
||||
"Labels",
|
||||
"Status",
|
||||
}
|
||||
|
||||
// decoding into types.Container do not work since it eventually unmarshal
|
||||
// and empty field to an empty go map, so we just check for a string
|
||||
for _, f := range fields {
|
||||
if !strings.Contains(string(body), f) {
|
||||
c.Fatalf("Field %s is missing and it shouldn't", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerApiGetExport(c *check.C) {
|
||||
name := "exportcontainer"
|
||||
runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test")
|
||||
@@ -254,7 +287,7 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainerStatsRmRunning(c *check.C) {
|
||||
func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
|
||||
id := strings.TrimSpace(out)
|
||||
|
||||
@@ -290,6 +323,89 @@ func (s *DockerSuite) TestContainerStatsRmRunning(c *check.C) {
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
}
|
||||
|
||||
// regression test for gh13421
|
||||
// previous test was just checking one stat entry so it didn't fail (stats with
|
||||
// stream false always return one stat)
|
||||
func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) {
|
||||
name := "statscontainer"
|
||||
runCmd := exec.Command(dockerBinary, "run", "-d", "--name", name, "busybox", "top")
|
||||
_, err := runCommand(runCmd)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
type b struct {
|
||||
status int
|
||||
body []byte
|
||||
err error
|
||||
}
|
||||
bc := make(chan b, 1)
|
||||
go func() {
|
||||
status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil)
|
||||
bc <- b{status, body, err}
|
||||
}()
|
||||
|
||||
// allow some time to stream the stats from the container
|
||||
time.Sleep(4 * time.Second)
|
||||
if _, err := runCommand(exec.Command(dockerBinary, "rm", "-f", name)); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// collect the results from the stats stream or timeout and fail
|
||||
// if the stream was not disconnected.
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
c.Fatal("stream was not closed after container was removed")
|
||||
case sr := <-bc:
|
||||
c.Assert(sr.err, check.IsNil)
|
||||
c.Assert(sr.status, check.Equals, http.StatusOK)
|
||||
|
||||
s := string(sr.body)
|
||||
// count occurrences of "read" of types.Stats
|
||||
if l := strings.Count(s, "read"); l < 2 {
|
||||
c.Fatalf("Expected more than one stat streamed, got %d", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) {
|
||||
name := "statscontainer"
|
||||
runCmd := exec.Command(dockerBinary, "run", "-d", "--name", name, "busybox", "top")
|
||||
_, err := runCommand(runCmd)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
type b struct {
|
||||
status int
|
||||
body []byte
|
||||
err error
|
||||
}
|
||||
bc := make(chan b, 1)
|
||||
go func() {
|
||||
status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil)
|
||||
bc <- b{status, body, err}
|
||||
}()
|
||||
|
||||
// allow some time to stream the stats from the container
|
||||
time.Sleep(4 * time.Second)
|
||||
if _, err := runCommand(exec.Command(dockerBinary, "rm", "-f", name)); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// collect the results from the stats stream or timeout and fail
|
||||
// if the stream was not disconnected.
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
c.Fatal("stream was not closed after container was removed")
|
||||
case sr := <-bc:
|
||||
c.Assert(sr.err, check.IsNil)
|
||||
c.Assert(sr.status, check.Equals, http.StatusOK)
|
||||
|
||||
s := string(sr.body)
|
||||
// count occurrences of "read" of types.Stats
|
||||
if l := strings.Count(s, "read"); l != 1 {
|
||||
c.Fatalf("Expected only one stat streamed, got %d", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) {
|
||||
// TODO: this test does nothing because we are c.Assert'ing in goroutine
|
||||
var (
|
||||
|
||||
48
integration-cli/docker_api_stats.go
Normal file
48
integration-cli/docker_api_stats.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/go-check/check"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *DockerSuite) TestCliStatsNoStreamGetCpu(c *check.C) {
|
||||
out, _ := dockerCmd(c, "run", "-d", "--cpu-quota=2000", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello';done")
|
||||
|
||||
id := strings.TrimSpace(out)
|
||||
if err := waitRun(id); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
ch := make(chan error)
|
||||
var v *types.Stats
|
||||
go func() {
|
||||
_, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=1", id), nil, "")
|
||||
if err != nil {
|
||||
ch <- err
|
||||
}
|
||||
dec := json.NewDecoder(body)
|
||||
if err := dec.Decode(&v); err != nil {
|
||||
ch <- err
|
||||
}
|
||||
ch <- nil
|
||||
}()
|
||||
select {
|
||||
case e := <-ch:
|
||||
if e == nil {
|
||||
var cpuPercent = 0.0
|
||||
cpuDelta := float64(v.CpuStats.CpuUsage.TotalUsage - v.PreCpuStats.CpuUsage.TotalUsage)
|
||||
systemDelta := float64(v.CpuStats.SystemUsage - v.PreCpuStats.SystemUsage)
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0
|
||||
if cpuPercent < 1.8 || cpuPercent > 2.2 {
|
||||
c.Fatal("docker stats with no-stream get cpu usage failed")
|
||||
}
|
||||
|
||||
}
|
||||
case <-time.After(4 * time.Second):
|
||||
c.Fatal("docker stats with no-stream timeout")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1165,3 +1165,19 @@ func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) {
|
||||
c.Assert(strings.Contains(out, "eth0"), check.Equals, false,
|
||||
check.Commentf("There shouldn't be eth0 in container when network is disabled: %s", out))
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) {
|
||||
if err := s.d.StartWithBusybox(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if out, err := s.d.Cmd("run", "-ti", "-d", "--name", "test", "busybox"); err != nil {
|
||||
t.Fatal(out, err)
|
||||
}
|
||||
if err := s.d.Restart(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Container 'test' should be removed without error
|
||||
if out, err := s.d.Cmd("rm", "test"); err != nil {
|
||||
t.Fatal(out, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,8 +17,13 @@ func (s *DockerSuite) TestExperimentalVersion(c *check.C) {
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(out, "\n") {
|
||||
if strings.HasPrefix(line, "Client version:") || strings.HasPrefix(line, "Server version:") {
|
||||
c.Assert(line, check.Matches, "*-experimental")
|
||||
if strings.HasPrefix(line, "Experimental (client):") || strings.HasPrefix(line, "Experimental (server):") {
|
||||
c.Assert(line, check.Matches, "*true")
|
||||
}
|
||||
}
|
||||
|
||||
versionCmd = exec.Command(dockerBinary, "-v")
|
||||
if out, _, err = runCommandWithOutput(versionCmd); err != nil || !strings.Contains(out, ", experimental") {
|
||||
c.Fatalf("docker version did not contain experimental: %s, %v", out, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +93,8 @@ func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
|
||||
// Skip first line, its "Commands:"
|
||||
cmds := []string{}
|
||||
for _, cmd := range strings.Split(out[i:], "\n")[1:] {
|
||||
var stderr string
|
||||
|
||||
// Stop on blank line or non-idented line
|
||||
if cmd == "" || !unicode.IsSpace(rune(cmd[0])) {
|
||||
break
|
||||
@@ -102,12 +104,24 @@ func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
|
||||
cmd = strings.Split(strings.TrimSpace(cmd), " ")[0]
|
||||
cmds = append(cmds, cmd)
|
||||
|
||||
// Check the full usage text
|
||||
helpCmd := exec.Command(dockerBinary, cmd, "--help")
|
||||
helpCmd.Env = newEnvs
|
||||
out, ec, err := runCommandWithOutput(helpCmd)
|
||||
out, stderr, ec, err = runCommandWithStdoutStderr(helpCmd)
|
||||
if len(stderr) != 0 {
|
||||
c.Fatalf("Error on %q help. non-empty stderr:%q", cmd, stderr)
|
||||
}
|
||||
if strings.HasSuffix(out, "\n\n") {
|
||||
c.Fatalf("Should not have blank line on %q\nout:%q", cmd, out)
|
||||
}
|
||||
if !strings.Contains(out, "--help=false") {
|
||||
c.Fatalf("Should show full usage on %q\nout:%q", cmd, out)
|
||||
}
|
||||
if err != nil || ec != 0 {
|
||||
c.Fatalf("Error on %q help: %s\nexit code:%d", cmd, out, ec)
|
||||
}
|
||||
|
||||
// Check each line for lots of stuff
|
||||
lines := strings.Split(out, "\n")
|
||||
for _, line := range lines {
|
||||
if len(line) > 80 {
|
||||
@@ -142,6 +156,77 @@ func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// For each command make sure we generate an error
|
||||
// if we give a bad arg
|
||||
dCmd := exec.Command(dockerBinary, cmd, "--badArg")
|
||||
out, stderr, ec, err = runCommandWithStdoutStderr(dCmd)
|
||||
if len(out) != 0 || len(stderr) == 0 || ec == 0 || err == nil {
|
||||
c.Fatalf("Bad results from 'docker %s --badArg'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", cmd, ec, out, stderr, err)
|
||||
}
|
||||
// Be really picky
|
||||
if strings.HasSuffix(stderr, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line at the end of 'docker rm'\n%s", stderr)
|
||||
}
|
||||
|
||||
// Now make sure that each command will print a short-usage
|
||||
// (not a full usage - meaning no opts section) if we
|
||||
// are missing a required arg or pass in a bad arg
|
||||
|
||||
// These commands will never print a short-usage so don't test
|
||||
noShortUsage := map[string]string{
|
||||
"images": "",
|
||||
"login": "",
|
||||
"logout": "",
|
||||
}
|
||||
|
||||
if _, ok := noShortUsage[cmd]; !ok {
|
||||
// For each command run it w/o any args. It will either return
|
||||
// valid output or print a short-usage
|
||||
var dCmd *exec.Cmd
|
||||
var stdout, stderr string
|
||||
var args []string
|
||||
|
||||
// skipNoArgs are ones that we don't want to try w/o
|
||||
// any args. Either because it'll hang the test or
|
||||
// lead to incorrect test result (like false negative).
|
||||
// Whatever the reason, skip trying to run w/o args and
|
||||
// jump to trying with a bogus arg.
|
||||
skipNoArgs := map[string]string{
|
||||
"events": "",
|
||||
"load": "",
|
||||
}
|
||||
|
||||
ec = 0
|
||||
if _, ok := skipNoArgs[cmd]; !ok {
|
||||
args = []string{cmd}
|
||||
dCmd = exec.Command(dockerBinary, args...)
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(dCmd)
|
||||
}
|
||||
|
||||
// If its ok w/o any args then try again with an arg
|
||||
if ec == 0 {
|
||||
args = []string{cmd, "badArg"}
|
||||
dCmd = exec.Command(dockerBinary, args...)
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(dCmd)
|
||||
}
|
||||
|
||||
if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil {
|
||||
c.Fatalf("Bad output from %q\nstdout:%q\nstderr:%q\nec:%d\nerr:%q", args, stdout, stderr, ec, err)
|
||||
}
|
||||
// Should have just short usage
|
||||
if !strings.Contains(stderr, "\nUsage: ") {
|
||||
c.Fatalf("Missing short usage on %q\nstderr:%q", args, stderr)
|
||||
}
|
||||
// But shouldn't have full usage
|
||||
if strings.Contains(stderr, "--help=false") {
|
||||
c.Fatalf("Should not have full usage on %q\nstderr:%q", args, stderr)
|
||||
}
|
||||
if strings.HasSuffix(stderr, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line on %q\nstderr:%q", args, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
expected := 39
|
||||
@@ -153,31 +238,92 @@ func (s *DockerSuite) TestHelpTextVerify(c *check.C) {
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestHelpErrorStderr(c *check.C) {
|
||||
// If we had a generic CLI test file this one shoudl go in there
|
||||
func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) {
|
||||
// Test to make sure the exit code and output (stdout vs stderr) of
|
||||
// various good and bad cases are what we expect
|
||||
|
||||
cmd := exec.Command(dockerBinary, "boogie")
|
||||
out, ec, err := runCommandWithOutput(cmd)
|
||||
if err == nil || ec == 0 {
|
||||
c.Fatalf("Boogie command should have failed")
|
||||
// docker : stdout=all, stderr=empty, rc=0
|
||||
cmd := exec.Command(dockerBinary)
|
||||
stdout, stderr, ec, err := runCommandWithStdoutStderr(cmd)
|
||||
if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil {
|
||||
c.Fatalf("Bad results from 'docker'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", ec, stdout, stderr, err)
|
||||
}
|
||||
// Be really pick
|
||||
if strings.HasSuffix(stdout, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line at the end of 'docker'\n%s", stdout)
|
||||
}
|
||||
|
||||
expected := "docker: 'boogie' is not a docker command. See 'docker --help'.\n"
|
||||
if out != expected {
|
||||
c.Fatalf("Bad output from boogie\nGot:%s\nExpected:%s", out, expected)
|
||||
// docker help: stdout=all, stderr=empty, rc=0
|
||||
cmd = exec.Command(dockerBinary, "help")
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd)
|
||||
if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil {
|
||||
c.Fatalf("Bad results from 'docker help'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", ec, stdout, stderr, err)
|
||||
}
|
||||
// Be really pick
|
||||
if strings.HasSuffix(stdout, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line at the end of 'docker help'\n%s", stdout)
|
||||
}
|
||||
|
||||
cmd = exec.Command(dockerBinary, "rename", "foo", "bar")
|
||||
out, ec, err = runCommandWithOutput(cmd)
|
||||
if err == nil || ec == 0 {
|
||||
c.Fatalf("Rename should have failed")
|
||||
// docker --help: stdout=all, stderr=empty, rc=0
|
||||
cmd = exec.Command(dockerBinary, "--help")
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd)
|
||||
if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil {
|
||||
c.Fatalf("Bad results from 'docker --help'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", ec, stdout, stderr, err)
|
||||
}
|
||||
// Be really pick
|
||||
if strings.HasSuffix(stdout, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line at the end of 'docker --help'\n%s", stdout)
|
||||
}
|
||||
|
||||
expected = `Error response from daemon: no such id: foo
|
||||
Error: failed to rename container named foo
|
||||
`
|
||||
if out != expected {
|
||||
c.Fatalf("Bad output from rename\nGot:%s\nExpected:%s", out, expected)
|
||||
// docker inspect busybox: stdout=all, stderr=empty, rc=0
|
||||
// Just making sure stderr is empty on valid cmd
|
||||
cmd = exec.Command(dockerBinary, "inspect", "busybox")
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd)
|
||||
if len(stdout) == 0 || len(stderr) != 0 || ec != 0 || err != nil {
|
||||
c.Fatalf("Bad results from 'docker inspect busybox'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", ec, stdout, stderr, err)
|
||||
}
|
||||
// Be really pick
|
||||
if strings.HasSuffix(stdout, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line at the end of 'docker inspect busyBox'\n%s", stdout)
|
||||
}
|
||||
|
||||
// docker rm: stdout=empty, stderr=all, rc!=0
|
||||
// testing the min arg error msg
|
||||
cmd = exec.Command(dockerBinary, "rm")
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd)
|
||||
if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil {
|
||||
c.Fatalf("Bad results from 'docker rm'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", ec, stdout, stderr, err)
|
||||
}
|
||||
// Should not contain full help text but should contain info about
|
||||
// # of args and Usage line
|
||||
if !strings.Contains(stderr, "requires a minimum") {
|
||||
c.Fatalf("Missing # of args text from 'docker rm'\nstderr:%s", stderr)
|
||||
}
|
||||
|
||||
// docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0
|
||||
// testing to make sure no blank line on error
|
||||
cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer")
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd)
|
||||
if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil {
|
||||
c.Fatalf("Bad results from 'docker rm NoSuchContainer'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", ec, stdout, stderr, err)
|
||||
}
|
||||
// Be really picky
|
||||
if strings.HasSuffix(stderr, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line at the end of 'docker rm'\n%s", stderr)
|
||||
}
|
||||
|
||||
// docker BadCmd: stdout=empty, stderr=all, rc=0
|
||||
cmd = exec.Command(dockerBinary, "BadCmd")
|
||||
stdout, stderr, ec, err = runCommandWithStdoutStderr(cmd)
|
||||
if len(stdout) != 0 || len(stderr) == 0 || ec == 0 || err == nil {
|
||||
c.Fatalf("Bad results from 'docker BadCmd'\nec:%d\nstdout:%s\nstderr:%s\nerr:%q", ec, stdout, stderr, err)
|
||||
}
|
||||
if stderr != "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'.\n" {
|
||||
c.Fatalf("Unexcepted output for 'docker badCmd'\nstderr:%s", stderr)
|
||||
}
|
||||
// Be really picky
|
||||
if strings.HasSuffix(stderr, "\n\n") {
|
||||
c.Fatalf("Should not have a blank line at the end of 'docker rm'\n%s", stderr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -58,3 +58,62 @@ func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) {
|
||||
c.Fatal("killed container is still running")
|
||||
}
|
||||
}
|
||||
|
||||
// regression test about correct signal parsing see #13665
|
||||
func (s *DockerSuite) TestKillWithSignal(c *check.C) {
|
||||
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
|
||||
out, _, err := runCommandWithOutput(runCmd)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
cid := strings.TrimSpace(out)
|
||||
c.Assert(waitRun(cid), check.IsNil)
|
||||
|
||||
killCmd := exec.Command(dockerBinary, "kill", "-s", "SIGWINCH", cid)
|
||||
_, err = runCommand(killCmd)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
running, err := inspectField(cid, "State.Running")
|
||||
if running != "true" {
|
||||
c.Fatal("Container should be in running state after SIGWINCH")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) {
|
||||
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top")
|
||||
out, _, err := runCommandWithOutput(runCmd)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
cid := strings.TrimSpace(out)
|
||||
c.Assert(waitRun(cid), check.IsNil)
|
||||
|
||||
killCmd := exec.Command(dockerBinary, "kill", "-s", "0", cid)
|
||||
out, _, err = runCommandWithOutput(killCmd)
|
||||
c.Assert(err, check.NotNil)
|
||||
if !strings.ContainsAny(out, "Invalid signal: 0") {
|
||||
c.Fatal("Kill with an invalid signal didn't error out correctly")
|
||||
}
|
||||
|
||||
running, err := inspectField(cid, "State.Running")
|
||||
if running != "true" {
|
||||
c.Fatal("Container should be in running state after an invalid signal")
|
||||
}
|
||||
|
||||
runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top")
|
||||
out, _, err = runCommandWithOutput(runCmd)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
cid = strings.TrimSpace(out)
|
||||
c.Assert(waitRun(cid), check.IsNil)
|
||||
|
||||
killCmd = exec.Command(dockerBinary, "kill", "-s", "SIG42", cid)
|
||||
out, _, err = runCommandWithOutput(killCmd)
|
||||
c.Assert(err, check.NotNil)
|
||||
if !strings.ContainsAny(out, "Invalid signal: SIG42") {
|
||||
c.Fatal("Kill with an invalid signal error out correctly")
|
||||
}
|
||||
|
||||
running, err = inspectField(cid, "State.Running")
|
||||
if running != "true" {
|
||||
c.Fatal("Container should be in running state after an invalid signal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -9,15 +10,14 @@ import (
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
func startServerContainer(c *check.C, proto string, port int) string {
|
||||
pStr := fmt.Sprintf("%d:%d", port, port)
|
||||
bCmd := fmt.Sprintf("nc -lp %d && echo bye", port)
|
||||
cmd := []string{"-d", "-p", pStr, "busybox", "sh", "-c", bCmd}
|
||||
if proto == "udp" {
|
||||
cmd = append(cmd, "-u")
|
||||
}
|
||||
|
||||
func startServerContainer(c *check.C, msg string, port int) string {
|
||||
name := "server"
|
||||
cmd := []string{
|
||||
"-d",
|
||||
"-p", fmt.Sprintf("%d:%d", port, port),
|
||||
"busybox",
|
||||
"sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port),
|
||||
}
|
||||
if err := waitForContainer(name, cmd...); err != nil {
|
||||
c.Fatalf("Failed to launch server container: %v", err)
|
||||
}
|
||||
@@ -60,52 +60,41 @@ func getContainerStatus(c *check.C, containerID string) string {
|
||||
|
||||
func (s *DockerSuite) TestNetworkNat(c *check.C) {
|
||||
testRequires(c, SameHostDaemon, NativeExecDriver)
|
||||
|
||||
srv := startServerContainer(c, "tcp", 8080)
|
||||
|
||||
// Spawn a new container which connects to the server through the
|
||||
// interface address.
|
||||
msg := "it works"
|
||||
startServerContainer(c, msg, 8080)
|
||||
endpoint := getExternalAddress(c)
|
||||
runCmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", endpoint))
|
||||
if out, _, err := runCommandWithOutput(runCmd); err != nil {
|
||||
c.Fatalf("Failed to connect to server: %v (output: %q)", err, string(out))
|
||||
conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080))
|
||||
if err != nil {
|
||||
c.Fatalf("Failed to connect to container (%v)", err)
|
||||
}
|
||||
|
||||
result := getContainerLogs(c, srv)
|
||||
|
||||
// Ideally we'd like to check for "hello world" but sometimes
|
||||
// nc doesn't show the data it received so instead let's look for
|
||||
// the output of the 'echo bye' that should be printed once
|
||||
// the nc command gets a connection
|
||||
expected := "bye"
|
||||
if !strings.Contains(result, expected) {
|
||||
c.Fatalf("Unexpected output. Expected: %q, received: %q", expected, result)
|
||||
data, err := ioutil.ReadAll(conn)
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
final := strings.TrimRight(string(data), "\n")
|
||||
if final != msg {
|
||||
c.Fatalf("Expected message %q but received %q", msg, final)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) {
|
||||
testRequires(c, SameHostDaemon, NativeExecDriver)
|
||||
|
||||
srv := startServerContainer(c, "tcp", 8081)
|
||||
|
||||
// Attempt to connect from the host to the listening container.
|
||||
var (
|
||||
msg = "hi yall"
|
||||
)
|
||||
startServerContainer(c, msg, 8081)
|
||||
conn, err := net.Dial("tcp", "localhost:8081")
|
||||
if err != nil {
|
||||
c.Fatalf("Failed to connect to container (%v)", err)
|
||||
}
|
||||
if _, err := conn.Write([]byte("hello world\n")); err != nil {
|
||||
data, err := ioutil.ReadAll(conn)
|
||||
conn.Close()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
result := getContainerLogs(c, srv)
|
||||
|
||||
// Ideally we'd like to check for "hello world" but sometimes
|
||||
// nc doesn't show the data it received so instead let's look for
|
||||
// the output of the 'echo bye' that should be printed once
|
||||
// the nc command gets a connection
|
||||
expected := "bye"
|
||||
if !strings.Contains(result, expected) {
|
||||
c.Fatalf("Unexpected output. Expected: %q, received: %q", expected, result)
|
||||
final := strings.TrimRight(string(data), "\n")
|
||||
if final != msg {
|
||||
c.Fatalf("Expected message %q but received %q", msg, final)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +74,55 @@ func (s *DockerSuite) TestRmiTag(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) {
|
||||
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'")
|
||||
out, _, err := runCommandWithOutput(runCmd)
|
||||
if err != nil {
|
||||
c.Fatalf("failed to create a container:%s, %v", out, err)
|
||||
}
|
||||
containerID := strings.TrimSpace(out)
|
||||
runCmd = exec.Command(dockerBinary, "commit", containerID, "busybox-one")
|
||||
out, _, err = runCommandWithOutput(runCmd)
|
||||
if err != nil {
|
||||
c.Fatalf("failed to commit a new busybox-one:%s, %v", out, err)
|
||||
}
|
||||
|
||||
imagesBefore, _ := dockerCmd(c, "images", "-a")
|
||||
dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1")
|
||||
dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2")
|
||||
|
||||
imagesAfter, _ := dockerCmd(c, "images", "-a")
|
||||
if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 {
|
||||
c.Fatalf("tag busybox to create 2 more images with same imageID; docker images shows: %q\n", imagesAfter)
|
||||
}
|
||||
|
||||
imgID, err := inspectField("busybox-one:tag1", "Id")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// run a container with the image
|
||||
out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox-one", "top"))
|
||||
if err != nil {
|
||||
c.Fatalf("failed to create a container:%s, %v", out, err)
|
||||
}
|
||||
containerID = strings.TrimSpace(out)
|
||||
|
||||
// first checkout without force it fails
|
||||
out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "rmi", imgID))
|
||||
expected := fmt.Sprintf("Conflict, cannot delete %s because the running container %s is using it, stop it and use -f to force", imgID[:12], containerID[:12])
|
||||
if err == nil || !strings.Contains(out, expected) {
|
||||
c.Fatalf("rmi tagged in multiple repos should have failed without force: %s, %v, expected: %s", out, err, expected)
|
||||
}
|
||||
|
||||
dockerCmd(c, "stop", containerID)
|
||||
dockerCmd(c, "rmi", "-f", imgID)
|
||||
|
||||
imagesAfter, _ = dockerCmd(c, "images", "-a")
|
||||
if strings.Contains(imagesAfter, imgID[:12]) {
|
||||
c.Fatalf("rmi -f %s failed, image still exists: %q\n\n", imgID, imagesAfter)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestRmiImgIDForce(c *check.C) {
|
||||
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'")
|
||||
out, _, err := runCommandWithOutput(runCmd)
|
||||
@@ -119,7 +168,6 @@ func (s *DockerSuite) TestRmiImgIDForce(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) {
|
||||
|
||||
container := "test-delete-tag"
|
||||
newtag := "busybox:newtag"
|
||||
bb := "busybox:latest"
|
||||
|
||||
@@ -3186,3 +3186,26 @@ func (s *DockerSuite) TestRunUnshareProc(c *check.C) {
|
||||
c.Fatalf("unshare should have failed with permission denied, got: %s, %v", out, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestRunPublishPort(c *check.C) {
|
||||
out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top"))
|
||||
c.Assert(err, check.IsNil)
|
||||
out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "port", "test"))
|
||||
c.Assert(err, check.IsNil)
|
||||
out = strings.Trim(out, "\r\n")
|
||||
if out != "" {
|
||||
c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out)
|
||||
}
|
||||
}
|
||||
|
||||
// Issue #10184.
|
||||
func (s *DockerSuite) TestDevicePermissions(c *check.C) {
|
||||
const permissions = "crw-rw-rw-"
|
||||
out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse")
|
||||
if status != 0 {
|
||||
c.Fatalf("expected status 0, got %d", status)
|
||||
}
|
||||
if !strings.HasPrefix(out, permissions) {
|
||||
c.Fatalf("output should begin with %q, got %q", permissions, out)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func (s *DockerSuite) TestCliStatsNoStream(c *check.C) {
|
||||
if err != nil {
|
||||
c.Fatalf("Error running stats: %v", err)
|
||||
}
|
||||
case <-time.After(2 * time.Second):
|
||||
case <-time.After(3 * time.Second):
|
||||
statsCmd.Process.Kill()
|
||||
c.Fatalf("stats did not return immediately when not streaming")
|
||||
}
|
||||
|
||||
@@ -60,10 +60,10 @@ func SortPortMap(ports []Port, bindings PortMap) {
|
||||
for _, b := range binding {
|
||||
s = append(s, portMapEntry{port: p, binding: b})
|
||||
}
|
||||
bindings[p] = []PortBinding{}
|
||||
} else {
|
||||
s = append(s, portMapEntry{port: p})
|
||||
}
|
||||
bindings[p] = []PortBinding{}
|
||||
}
|
||||
|
||||
sort.Sort(s)
|
||||
@@ -79,7 +79,9 @@ func SortPortMap(ports []Port, bindings PortMap) {
|
||||
i++
|
||||
}
|
||||
// reorder bindings for this port
|
||||
bindings[entry.port] = append(bindings[entry.port], entry.binding)
|
||||
if _, ok := bindings[entry.port]; ok {
|
||||
bindings[entry.port] = append(bindings[entry.port], entry.binding)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
11
opts/opts.go
11
opts/opts.go
@@ -33,7 +33,7 @@ func MapVar(values map[string]string, names []string, usage string) {
|
||||
}
|
||||
|
||||
func LogOptsVar(values map[string]string, names []string, usage string) {
|
||||
flag.Var(newMapOpt(values, ValidateLogOpts), names, usage)
|
||||
flag.Var(newMapOpt(values, nil), names, usage)
|
||||
}
|
||||
|
||||
func HostListVar(values *[]string, names []string, usage string) {
|
||||
@@ -176,15 +176,6 @@ func newMapOpt(values map[string]string, validator ValidatorFctType) *MapOpts {
|
||||
type ValidatorFctType func(val string) (string, error)
|
||||
type ValidatorFctListType func(val string) ([]string, error)
|
||||
|
||||
func ValidateLogOpts(val string) (string, error) {
|
||||
allowedKeys := map[string]string{}
|
||||
vals := strings.Split(val, "=")
|
||||
if allowedKeys[vals[0]] != "" {
|
||||
return val, nil
|
||||
}
|
||||
return "", fmt.Errorf("%s is not a valid log opt", vals[0])
|
||||
}
|
||||
|
||||
func ValidateAttach(val string) (string, error) {
|
||||
s := strings.ToLower(val)
|
||||
for _, str := range []string{"stdin", "stdout", "stderr"} {
|
||||
|
||||
14
pkg/ioutils/fmt.go
Normal file
14
pkg/ioutils/fmt.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package ioutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// FprintfIfNotEmpty prints the string value if it's not empty
|
||||
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
||||
if value != "" {
|
||||
return fmt.Fprintf(w, format, value)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
17
pkg/ioutils/fmt_test.go
Normal file
17
pkg/ioutils/fmt_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package ioutils
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestFprintfIfNotEmpty(t *testing.T) {
|
||||
wc := NewWriteCounter(&NopWriter{})
|
||||
n, _ := FprintfIfNotEmpty(wc, "foo%s", "")
|
||||
|
||||
if wc.Count != 0 || n != 0 {
|
||||
t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n)
|
||||
}
|
||||
|
||||
n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar")
|
||||
if wc.Count != 6 || n != 6 {
|
||||
t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n)
|
||||
}
|
||||
}
|
||||
@@ -289,7 +289,8 @@ type FlagSet struct {
|
||||
// Usage is the function called when an error occurs while parsing flags.
|
||||
// The field is a function (not a method) that may be changed to point to
|
||||
// a custom error handler.
|
||||
Usage func()
|
||||
Usage func()
|
||||
ShortUsage func()
|
||||
|
||||
name string
|
||||
parsed bool
|
||||
@@ -511,6 +512,12 @@ func (f *FlagSet) PrintDefaults() {
|
||||
if runtime.GOOS != "windows" && home == "/" {
|
||||
home = ""
|
||||
}
|
||||
|
||||
// Add a blank line between cmd description and list of options
|
||||
if f.FlagCount() > 0 {
|
||||
fmt.Fprintln(writer, "")
|
||||
}
|
||||
|
||||
f.VisitAll(func(flag *Flag) {
|
||||
format := " -%s=%s"
|
||||
names := []string{}
|
||||
@@ -564,6 +571,12 @@ var Usage = func() {
|
||||
PrintDefaults()
|
||||
}
|
||||
|
||||
// Usage prints to standard error a usage message documenting the standard command layout
|
||||
// The function is a variable that may be changed to point to a custom function.
|
||||
var ShortUsage = func() {
|
||||
fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0])
|
||||
}
|
||||
|
||||
// FlagCount returns the number of flags that have been defined.
|
||||
func (f *FlagSet) FlagCount() int { return len(sortFlags(f.formal)) }
|
||||
|
||||
@@ -1067,12 +1080,15 @@ func (cmd *FlagSet) ParseFlags(args []string, withHelp bool) error {
|
||||
return err
|
||||
}
|
||||
if help != nil && *help {
|
||||
cmd.SetOutput(os.Stdout)
|
||||
cmd.Usage()
|
||||
// just in case Usage does not exit
|
||||
os.Exit(0)
|
||||
}
|
||||
if str := cmd.CheckArgs(); str != "" {
|
||||
cmd.SetOutput(os.Stderr)
|
||||
cmd.ReportError(str, withHelp)
|
||||
cmd.ShortUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1080,13 +1096,12 @@ func (cmd *FlagSet) ParseFlags(args []string, withHelp bool) error {
|
||||
func (cmd *FlagSet) ReportError(str string, withHelp bool) {
|
||||
if withHelp {
|
||||
if os.Args[0] == cmd.Name() {
|
||||
str += ". See '" + os.Args[0] + " --help'"
|
||||
str += ".\nSee '" + os.Args[0] + " --help'"
|
||||
} else {
|
||||
str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'"
|
||||
str += ".\nSee '" + os.Args[0] + " " + cmd.Name() + " --help'"
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(cmd.Out(), "docker: %s\n", str)
|
||||
os.Exit(1)
|
||||
fmt.Fprintf(cmd.Out(), "docker: %s.\n", str)
|
||||
}
|
||||
|
||||
// Parsed reports whether f.Parse has been called.
|
||||
|
||||
@@ -68,6 +68,7 @@ func (c *Client) callWithRetry(serviceMethod string, args interface{}, ret inter
|
||||
continue
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
remoteErr, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package urlutil
|
||||
|
||||
import "strings"
|
||||
|
||||
var validUrlPrefixes = []string{
|
||||
"http://",
|
||||
"https://",
|
||||
}
|
||||
|
||||
// IsURL returns true if the provided str is a valid URL by doing
|
||||
// a simple change for the transport of the url.
|
||||
func IsURL(str string) bool {
|
||||
for _, prefix := range validUrlPrefixes {
|
||||
if strings.HasPrefix(str, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -6,26 +6,25 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
validPrefixes = []string{
|
||||
"git://",
|
||||
"github.com/",
|
||||
"git@",
|
||||
validPrefixes = map[string][]string{
|
||||
"url": {"http://", "https://"},
|
||||
"git": {"git://", "github.com/", "git@"},
|
||||
"transport": {"tcp://", "udp://", "unix://"},
|
||||
}
|
||||
|
||||
urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
|
||||
)
|
||||
|
||||
// IsURL returns true if the provided str is an HTTP(S) URL.
|
||||
func IsURL(str string) bool {
|
||||
return checkURL(str, "url")
|
||||
}
|
||||
|
||||
// IsGitURL returns true if the provided str is a git repository URL.
|
||||
func IsGitURL(str string) bool {
|
||||
if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
|
||||
return true
|
||||
}
|
||||
for _, prefix := range validPrefixes {
|
||||
if strings.HasPrefix(str, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return checkURL(str, "git")
|
||||
}
|
||||
|
||||
// IsGitTransport returns true if the provided str is a git transport by inspecting
|
||||
@@ -33,3 +32,17 @@ func IsGitURL(str string) bool {
|
||||
func IsGitTransport(str string) bool {
|
||||
return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
|
||||
}
|
||||
|
||||
// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL.
|
||||
func IsTransportURL(str string) bool {
|
||||
return checkURL(str, "transport")
|
||||
}
|
||||
|
||||
func checkURL(str, kind string) bool {
|
||||
for _, prefix := range validPrefixes[kind] {
|
||||
if strings.HasPrefix(str, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -5,7 +5,7 @@ the Docker project.
|
||||
|
||||
### CI
|
||||
|
||||
The Docker project uses [Jenkins](https://jenkins.dockerproject.com/) as our
|
||||
The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our
|
||||
continuous integration server. Each Pull Request to Docker is tested by running the
|
||||
equivalent of `make all`. We chose Jenkins because we can host it ourselves and
|
||||
we run Docker in Docker to test.
|
||||
|
||||
30
project/make/validate-toml
Normal file
30
project/make/validate-toml
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
# we use "git show" here to validate that what's committed is formatted
|
||||
if [ "$(git show "$VALIDATE_HEAD:$f" | tomlv)" ]; then
|
||||
badFiles+=( "$f" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All toml source files have valid syntax.'
|
||||
else
|
||||
{
|
||||
echo "These files are not valid toml:"
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
echo 'Please reformat the above files as valid toml'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
@@ -56,7 +57,10 @@ func init() {
|
||||
dockerUserAgent = useragent.AppendVersions("", httpVersion...)
|
||||
}
|
||||
|
||||
type httpsRequestModifier struct{ tlsConfig *tls.Config }
|
||||
type httpsRequestModifier struct {
|
||||
mu sync.Mutex
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
// DRAGONS(tiborvass): If someone wonders why do we set tlsconfig in a roundtrip,
|
||||
// it's because it's so as to match the current behavior in master: we generate the
|
||||
@@ -125,8 +129,10 @@ func (m *httpsRequestModifier) ModifyRequest(req *http.Request) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
m.mu.Lock()
|
||||
m.tlsConfig.RootCAs = roots
|
||||
m.tlsConfig.Certificates = certs
|
||||
m.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -175,7 +181,7 @@ func NewTransport(timeout TimeoutType, secure bool) http.RoundTripper {
|
||||
if secure {
|
||||
// note: httpsTransport also handles http transport
|
||||
// but for HTTPS, it sets up the certs
|
||||
return transport.NewTransport(tr, &httpsRequestModifier{tlsConfig})
|
||||
return transport.NewTransport(tr, &httpsRequestModifier{tlsConfig: tlsConfig})
|
||||
}
|
||||
|
||||
return tr
|
||||
|
||||
@@ -84,7 +84,13 @@ func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
|
||||
if req.Header.Get("Authorization") == "" {
|
||||
if req.Header.Get("X-Docker-Token") == "true" && len(tr.Username) > 0 {
|
||||
req.SetBasicAuth(tr.Username, tr.Password)
|
||||
} else if len(tr.token) > 0 {
|
||||
} else if len(tr.token) > 0 &&
|
||||
// Authorization should not be set on 302 redirect for untrusted locations.
|
||||
// This logic mirrors the behavior in AddRequiredHeadersToRedirectedRequests.
|
||||
// As the authorization logic is currently implemented in RoundTrip,
|
||||
// a 302 redirect is detected by looking at the Referer header as go http package adds said header.
|
||||
// This is safe as Docker doesn't set Referer in other scenarios.
|
||||
(req.Header.Get("Referer") == "" || trustedLocation(orig)) {
|
||||
req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ","))
|
||||
}
|
||||
}
|
||||
@@ -98,7 +104,11 @@ func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
|
||||
}
|
||||
resp.Body = &transport.OnEOFReader{
|
||||
Rc: resp.Body,
|
||||
Fn: func() { delete(tr.modReq, orig) },
|
||||
Fn: func() {
|
||||
tr.mu.Lock()
|
||||
delete(tr.modReq, orig)
|
||||
tr.mu.Unlock()
|
||||
},
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -68,10 +68,15 @@ func (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bo
|
||||
// 1.c) if anything else, err
|
||||
// 2) PUT the created/signed manifest
|
||||
//
|
||||
func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) ([]byte, string, error) {
|
||||
|
||||
// GetV2ImageManifest simply fetches the bytes of a manifest and the remote
|
||||
// digest, if available in the request. Note that the application shouldn't
|
||||
// rely on the untrusted remoteDigest, and should also verify against a
|
||||
// locally provided digest, if applicable.
|
||||
func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) (remoteDigest digest.Digest, p []byte, err error) {
|
||||
routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
method := "GET"
|
||||
@@ -79,31 +84,45 @@ func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, au
|
||||
|
||||
req, err := http.NewRequest(method, routeURL, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if err := auth.Authorize(req); err != nil {
|
||||
return nil, "", err
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
res, err := r.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return "", nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
if res.StatusCode == 401 {
|
||||
return nil, "", errLoginRequired
|
||||
return "", nil, errLoginRequired
|
||||
} else if res.StatusCode == 404 {
|
||||
return nil, "", ErrDoesNotExist
|
||||
return "", nil, ErrDoesNotExist
|
||||
}
|
||||
return nil, "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res)
|
||||
return "", nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res)
|
||||
}
|
||||
|
||||
manifestBytes, err := ioutil.ReadAll(res.Body)
|
||||
p, err = ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("Error while reading the http response: %s", err)
|
||||
return "", nil, fmt.Errorf("Error while reading the http response: %s", err)
|
||||
}
|
||||
|
||||
return manifestBytes, res.Header.Get(DockerDigestHeader), nil
|
||||
dgstHdr := res.Header.Get(DockerDigestHeader)
|
||||
if dgstHdr != "" {
|
||||
remoteDigest, err = digest.ParseDigest(dgstHdr)
|
||||
if err != nil {
|
||||
// NOTE(stevvooe): Including the remote digest is optional. We
|
||||
// don't need to verify against it, but it is good practice.
|
||||
remoteDigest = ""
|
||||
logrus.Debugf("error parsing remote digest when fetching %v: %v", routeURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// - Succeeded to head image blob (already exists)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
)
|
||||
|
||||
@@ -69,7 +70,11 @@ func checkoutGit(fragment, root string) (string, error) {
|
||||
}
|
||||
|
||||
if len(refAndDir) > 1 && len(refAndDir[1]) != 0 {
|
||||
newCtx := filepath.Join(root, refAndDir[1])
|
||||
newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(newCtx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -103,6 +103,14 @@ func TestCheckoutGit(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -147,6 +155,9 @@ func TestCheckoutGit(t *testing.T) {
|
||||
{":Dockerfile", "", true}, // not a directory error
|
||||
{"master:nosubdir", "", true},
|
||||
{"master:subdir", "FROM scratch\nEXPOSE 5000", false},
|
||||
{"master:parentlink", "FROM scratch\nEXPOSE 5000", false},
|
||||
{"master:absolutelink", "FROM scratch\nEXPOSE 5000", false},
|
||||
{"master:../subdir", "", true},
|
||||
{"test", "FROM scratch\nEXPOSE 3000", false},
|
||||
{"test:", "FROM scratch\nEXPOSE 3000", false},
|
||||
{"test:subdir", "FROM busybox\nEXPOSE 5000", false},
|
||||
|
||||
@@ -2,7 +2,6 @@ package digest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
const (
|
||||
// DigestTarSumV1EmptyTar is the digest for the empty tar file.
|
||||
DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
|
||||
// DigestSha256EmptyTar is the canonical sha256 digest of empty data
|
||||
DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
type Digest string
|
||||
|
||||
// NewDigest returns a Digest from alg and a hash.Hash object.
|
||||
func NewDigest(alg string, h hash.Hash) Digest {
|
||||
func NewDigest(alg Algorithm, h hash.Hash) Digest {
|
||||
return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
|
||||
}
|
||||
|
||||
@@ -72,13 +72,13 @@ func ParseDigest(s string) (Digest, error) {
|
||||
|
||||
// FromReader returns the most valid digest for the underlying content.
|
||||
func FromReader(rd io.Reader) (Digest, error) {
|
||||
h := sha256.New()
|
||||
digester := Canonical.New()
|
||||
|
||||
if _, err := io.Copy(h, rd); err != nil {
|
||||
if _, err := io.Copy(digester.Hash(), rd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return NewDigest("sha256", h), nil
|
||||
return digester.Digest(), nil
|
||||
}
|
||||
|
||||
// FromTarArchive produces a tarsum digest from reader rd.
|
||||
@@ -131,8 +131,8 @@ func (d Digest) Validate() error {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
switch s[:i] {
|
||||
case "sha256", "sha384", "sha512":
|
||||
switch Algorithm(s[:i]) {
|
||||
case SHA256, SHA384, SHA512:
|
||||
break
|
||||
default:
|
||||
return ErrDigestUnsupported
|
||||
@@ -143,8 +143,8 @@ func (d Digest) Validate() error {
|
||||
|
||||
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||
// the underlying digest is not in a valid format.
|
||||
func (d Digest) Algorithm() string {
|
||||
return string(d[:d.sepIndex()])
|
||||
func (d Digest) Algorithm() Algorithm {
|
||||
return Algorithm(d[:d.sepIndex()])
|
||||
}
|
||||
|
||||
// Hex returns the hex digest portion of the digest. This will panic if the
|
||||
|
||||
@@ -10,7 +10,7 @@ func TestParseDigest(t *testing.T) {
|
||||
for _, testcase := range []struct {
|
||||
input string
|
||||
err error
|
||||
algorithm string
|
||||
algorithm Algorithm
|
||||
hex string
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -1,44 +1,95 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Digester calculates the digest of written data. It is functionally
|
||||
// equivalent to hash.Hash but provides methods for returning the Digest type
|
||||
// rather than raw bytes.
|
||||
type Digester struct {
|
||||
alg string
|
||||
hash hash.Hash
|
||||
// Algorithm identifies and implementation of a digester by an identifier.
|
||||
// Note the that this defines both the hash algorithm used and the string
|
||||
// encoding.
|
||||
type Algorithm string
|
||||
|
||||
// supported digest types
|
||||
const (
|
||||
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||
TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only
|
||||
|
||||
// Canonical is the primary digest algorithm used with the distribution
|
||||
// project. Other digests may be used but this one is the primary storage
|
||||
// digest.
|
||||
Canonical = SHA256
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO(stevvooe): Follow the pattern of the standard crypto package for
|
||||
// registration of digests. Effectively, we are a registerable set and
|
||||
// common symbol access.
|
||||
|
||||
// algorithms maps values to hash.Hash implementations. Other algorithms
|
||||
// may be available but they cannot be calculated by the digest package.
|
||||
algorithms = map[Algorithm]crypto.Hash{
|
||||
SHA256: crypto.SHA256,
|
||||
SHA384: crypto.SHA384,
|
||||
SHA512: crypto.SHA512,
|
||||
}
|
||||
)
|
||||
|
||||
// Available returns true if the digest type is available for use. If this
|
||||
// returns false, New and Hash will return nil.
|
||||
func (a Algorithm) Available() bool {
|
||||
h, ok := algorithms[a]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// check availability of the hash, as well
|
||||
return h.Available()
|
||||
}
|
||||
|
||||
// NewDigester create a new Digester with the given hashing algorithm and instance
|
||||
// of that algo's hasher.
|
||||
func NewDigester(alg string, h hash.Hash) Digester {
|
||||
return Digester{
|
||||
alg: alg,
|
||||
hash: h,
|
||||
// New returns a new digester for the specified algorithm. If the algorithm
|
||||
// does not have a digester implementation, nil will be returned. This can be
|
||||
// checked by calling Available before calling New.
|
||||
func (a Algorithm) New() Digester {
|
||||
return &digester{
|
||||
alg: a,
|
||||
hash: a.Hash(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewCanonicalDigester is a convenience function to create a new Digester with
|
||||
// out default settings.
|
||||
func NewCanonicalDigester() Digester {
|
||||
return NewDigester("sha256", sha256.New())
|
||||
// Hash returns a new hash as used by the algorithm. If not available, nil is
|
||||
// returned. Make sure to check Available before calling.
|
||||
func (a Algorithm) Hash() hash.Hash {
|
||||
if !a.Available() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return algorithms[a].New()
|
||||
}
|
||||
|
||||
// Write data to the digester. These writes cannot fail.
|
||||
func (d *Digester) Write(p []byte) (n int, err error) {
|
||||
return d.hash.Write(p)
|
||||
// TODO(stevvooe): Allow resolution of verifiers using the digest type and
|
||||
// this registration system.
|
||||
|
||||
// Digester calculates the digest of written data. Writes should go directly
|
||||
// to the return value of Hash, while calling Digest will return the current
|
||||
// value of the digest.
|
||||
type Digester interface {
|
||||
Hash() hash.Hash // provides direct access to underlying hash instance.
|
||||
Digest() Digest
|
||||
}
|
||||
|
||||
// Digest returns the current digest for this digester.
|
||||
func (d *Digester) Digest() Digest {
|
||||
// digester provides a simple digester definition that embeds a hasher.
|
||||
type digester struct {
|
||||
alg Algorithm
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
func (d *digester) Hash() hash.Hash {
|
||||
return d.hash
|
||||
}
|
||||
|
||||
func (d *digester) Digest() Digest {
|
||||
return NewDigest(d.alg, d.hash)
|
||||
}
|
||||
|
||||
// Reset the state of the digester.
|
||||
func (d *Digester) Reset() {
|
||||
d.hash.Reset()
|
||||
}
|
||||
|
||||
21
vendor/src/github.com/docker/distribution/digest/digester_resumable_test.go
vendored
Normal file
21
vendor/src/github.com/docker/distribution/digest/digester_resumable_test.go
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// +build !noresumabledigest
|
||||
|
||||
package digest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stevvooe/resumable"
|
||||
_ "github.com/stevvooe/resumable/sha256"
|
||||
)
|
||||
|
||||
// TestResumableDetection just ensures that the resumable capability of a hash
|
||||
// is exposed through the digester type, which is just a hash plus a Digest
|
||||
// method.
|
||||
func TestResumableDetection(t *testing.T) {
|
||||
d := Canonical.New()
|
||||
|
||||
if _, ok := d.Hash().(resumable.Hash); !ok {
|
||||
t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash())
|
||||
}
|
||||
}
|
||||
195
vendor/src/github.com/docker/distribution/digest/set.go
vendored
Normal file
195
vendor/src/github.com/docker/distribution/digest/set.go
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are ommited from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (Digest, error) {
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := ParseDigest(d)
|
||||
if err == ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digests to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// table, this operation will be a no-op.
|
||||
func (dst *Set) Add(d Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[Digest]string {
|
||||
m := make(map[Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg Algorithm
|
||||
val string
|
||||
digest Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
272
vendor/src/github.com/docker/distribution/digest/set_test.go
vendored
Normal file
272
vendor/src/github.com/docker/distribution/digest/set_test.go
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
package digest
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assertEqualDigests(t *testing.T, d1, d2 Digest) {
|
||||
if d1 != d2 {
|
||||
t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookup(t *testing.T) {
|
||||
digests := []Digest{
|
||||
"sha256:12345",
|
||||
"sha256:1234",
|
||||
"sha256:12346",
|
||||
"sha256:54321",
|
||||
"sha256:65431",
|
||||
"sha256:64321",
|
||||
"sha256:65421",
|
||||
"sha256:65321",
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
dgst, err := dset.Lookup("54")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[3])
|
||||
|
||||
dgst, err = dset.Lookup("1234")
|
||||
if err == nil {
|
||||
t.Fatal("Expected ambiguous error looking up: 1234")
|
||||
}
|
||||
if err != ErrDigestAmbiguous {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dgst, err = dset.Lookup("9876")
|
||||
if err == nil {
|
||||
t.Fatal("Expected ambiguous error looking up: 9876")
|
||||
}
|
||||
if err != ErrDigestNotFound {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dgst, err = dset.Lookup("sha256:1234")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[1])
|
||||
|
||||
dgst, err = dset.Lookup("sha256:12345")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[0])
|
||||
|
||||
dgst, err = dset.Lookup("sha256:12346")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[2])
|
||||
|
||||
dgst, err = dset.Lookup("12346")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[2])
|
||||
|
||||
dgst, err = dset.Lookup("12345")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertEqualDigests(t, dgst, digests[0])
|
||||
}
|
||||
|
||||
func TestAddDuplication(t *testing.T) {
|
||||
digests := []Digest{
|
||||
"sha256:1234",
|
||||
"sha256:12345",
|
||||
"sha256:12346",
|
||||
"sha256:54321",
|
||||
"sha256:65431",
|
||||
"sha512:65431",
|
||||
"sha512:65421",
|
||||
"sha512:65321",
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(dset.entries) != 8 {
|
||||
t.Fatal("Invalid dset size")
|
||||
}
|
||||
|
||||
if err := dset.Add(Digest("sha256:12345")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(dset.entries) != 8 {
|
||||
t.Fatal("Duplicate digest insert allowed")
|
||||
}
|
||||
|
||||
if err := dset.Add(Digest("sha384:12345")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(dset.entries) != 9 {
|
||||
t.Fatal("Insert with different algorithm not allowed")
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualShort(t *testing.T, actual, expected string) {
|
||||
if actual != expected {
|
||||
t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShortCodeTable(t *testing.T) {
|
||||
digests := []Digest{
|
||||
"sha256:1234",
|
||||
"sha256:12345",
|
||||
"sha256:12346",
|
||||
"sha256:54321",
|
||||
"sha256:65431",
|
||||
"sha256:64321",
|
||||
"sha256:65421",
|
||||
"sha256:65321",
|
||||
}
|
||||
|
||||
dset := NewSet()
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
dump := ShortCodeTable(dset, 2)
|
||||
|
||||
if len(dump) < len(digests) {
|
||||
t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests))
|
||||
}
|
||||
|
||||
assertEqualShort(t, dump[digests[0]], "sha256:1234")
|
||||
assertEqualShort(t, dump[digests[1]], "sha256:12345")
|
||||
assertEqualShort(t, dump[digests[2]], "sha256:12346")
|
||||
assertEqualShort(t, dump[digests[3]], "54")
|
||||
assertEqualShort(t, dump[digests[4]], "6543")
|
||||
assertEqualShort(t, dump[digests[5]], "64")
|
||||
assertEqualShort(t, dump[digests[6]], "6542")
|
||||
assertEqualShort(t, dump[digests[7]], "653")
|
||||
}
|
||||
|
||||
func createDigests(count int) ([]Digest, error) {
|
||||
r := rand.New(rand.NewSource(25823))
|
||||
digests := make([]Digest, count)
|
||||
for i := range digests {
|
||||
h := sha256.New()
|
||||
if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digests[i] = NewDigest("sha256", h)
|
||||
}
|
||||
return digests, nil
|
||||
}
|
||||
|
||||
func benchAddNTable(b *testing.B, n int) {
|
||||
digests, err := createDigests(n)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||
for j := range digests {
|
||||
if err = dset.Add(digests[j]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchLookupNTable(b *testing.B, n int, shortLen int) {
|
||||
digests, err := createDigests(n)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
shorts := make([]string, 0, n)
|
||||
for _, short := range ShortCodeTable(dset, shortLen) {
|
||||
shorts = append(shorts, short)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err = dset.Lookup(shorts[i%n]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchShortCodeNTable(b *testing.B, n int, shortLen int) {
|
||||
digests, err := createDigests(n)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))}
|
||||
for i := range digests {
|
||||
if err := dset.Add(digests[i]); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ShortCodeTable(dset, shortLen)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAdd10(b *testing.B) {
|
||||
benchAddNTable(b, 10)
|
||||
}
|
||||
|
||||
func BenchmarkAdd100(b *testing.B) {
|
||||
benchAddNTable(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkAdd1000(b *testing.B) {
|
||||
benchAddNTable(b, 1000)
|
||||
}
|
||||
|
||||
func BenchmarkLookup10(b *testing.B) {
|
||||
benchLookupNTable(b, 10, 12)
|
||||
}
|
||||
|
||||
func BenchmarkLookup100(b *testing.B) {
|
||||
benchLookupNTable(b, 100, 12)
|
||||
}
|
||||
|
||||
func BenchmarkLookup1000(b *testing.B) {
|
||||
benchLookupNTable(b, 1000, 12)
|
||||
}
|
||||
|
||||
func BenchmarkShortCode10(b *testing.B) {
|
||||
benchShortCodeNTable(b, 10, 12)
|
||||
}
|
||||
func BenchmarkShortCode100(b *testing.B) {
|
||||
benchShortCodeNTable(b, 100, 12)
|
||||
}
|
||||
func BenchmarkShortCode1000(b *testing.B) {
|
||||
benchShortCodeNTable(b, 1000, 12)
|
||||
}
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// TarSumRegexp defines a reguler expression to match tarsum identifiers.
|
||||
// TarSumRegexp defines a regular expression to match tarsum identifiers.
|
||||
var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
|
||||
|
||||
// TarsumRegexpCapturing defines a reguler expression to match tarsum identifiers with
|
||||
// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with
|
||||
// capture groups corresponding to each component.
|
||||
var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user