mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Compare commits
214 Commits
00166d05d9
...
v18.09.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb137ff176 | ||
|
|
03dfb0ba53 | ||
|
|
a79fabbfe8 | ||
|
|
fc274cd2ff | ||
|
|
d4f336d8ef | ||
|
|
f80c6d7ae1 | ||
|
|
ce8b8f1cf3 | ||
|
|
24f71e3998 | ||
|
|
484a3c3ad0 | ||
|
|
6646d08782 | ||
|
|
a9ae6c7547 | ||
|
|
cc7773c787 | ||
|
|
b2185081d9 | ||
|
|
a6d4103450 | ||
|
|
b6430ba413 | ||
|
|
d161dfe1a3 | ||
|
|
8afe9f422d | ||
|
|
42b58273f6 | ||
|
|
a8572d3e8e | ||
|
|
01c732d40a | ||
|
|
3482a3b14a | ||
|
|
1ffccb515a | ||
|
|
55a4be8cf5 | ||
|
|
1043f40fb5 | ||
|
|
d21754a3fb | ||
|
|
b54b6d145c | ||
|
|
43dedf3975 | ||
|
|
a69626afb1 | ||
|
|
ad7105260f | ||
|
|
b66c7ad62e | ||
|
|
5cd4797c89 | ||
|
|
7dfd23acf1 | ||
|
|
6c633fbe18 | ||
|
|
2c64d7c858 | ||
|
|
82a4418f57 | ||
|
|
e7a4385e24 | ||
|
|
09251ef9ca | ||
|
|
00ad8e7c57 | ||
|
|
5fffdb3226 | ||
|
|
e32fc16daa | ||
|
|
9c93de59da | ||
|
|
73911117b3 | ||
|
|
8fe3b4d2ec | ||
|
|
a1f6b04a8d | ||
|
|
7a566c0e4a | ||
|
|
61a250fd23 | ||
|
|
8f18feabeb | ||
|
|
08a77f11a6 | ||
|
|
4fd103ae26 | ||
|
|
52a6fc02b1 | ||
|
|
12b8ec42b6 | ||
|
|
23122e4d52 | ||
|
|
04a6b49a89 | ||
|
|
c488cf7e95 | ||
|
|
c95cf2a5d3 | ||
|
|
9606931393 | ||
|
|
850fff5fc7 | ||
|
|
0d17f40994 | ||
|
|
34867646af | ||
|
|
0b2d88d328 | ||
|
|
27b0fee846 | ||
|
|
4cc45d91eb | ||
|
|
67c602c3fe | ||
|
|
db7f375d6a | ||
|
|
7d6ec38402 | ||
|
|
64a05e3d16 | ||
|
|
262abed3d2 | ||
|
|
e137337fe6 | ||
|
|
c9c87d76d6 | ||
|
|
a4decd0c4c | ||
|
|
25bec4665b | ||
|
|
56cc26f927 | ||
|
|
4980e48e4b | ||
|
|
299385de7f | ||
|
|
8486ea11ae | ||
|
|
5b8cee93b5 | ||
|
|
49556e0470 | ||
|
|
02fe71843e | ||
|
|
757650e8dc | ||
|
|
9e06a42123 | ||
|
|
e8eb3ca4ee | ||
|
|
673f04f0b1 | ||
|
|
65bf95f3df | ||
|
|
9fc9c3099d | ||
|
|
37cb9e7300 | ||
|
|
59be98043a | ||
|
|
f5749085e9 | ||
|
|
6236f7b8a4 | ||
|
|
9512677feb | ||
|
|
5bb36e25ba | ||
|
|
45654ed012 | ||
|
|
334099505f | ||
|
|
e1783a72d1 | ||
|
|
c27094289a | ||
|
|
0afe0309bd | ||
|
|
41f3cea42f | ||
|
|
9cf6464b63 | ||
|
|
52a3c39506 | ||
|
|
4fc9786f78 | ||
|
|
46dfcd83bf | ||
|
|
c40a7d393b | ||
|
|
fb51c760c4 | ||
|
|
66bfae52bc | ||
|
|
6ca0546f25 | ||
|
|
2822d49c10 | ||
|
|
64b0c76151 | ||
|
|
5591f0b1ee | ||
|
|
4594e70063 | ||
|
|
7236817725 | ||
|
|
78746ca9e8 | ||
|
|
5853cd510c | ||
|
|
6ee7d86a12 | ||
|
|
ae6284a623 | ||
|
|
8d624c31dd | ||
|
|
1222a7081a | ||
|
|
6f1145e740 | ||
|
|
ef87a664ef | ||
|
|
3dc9802a83 | ||
|
|
fd1fe0b702 | ||
|
|
fdaf08a57b | ||
|
|
4d0b8cc2d7 | ||
|
|
7c63f178e7 | ||
|
|
b811212ccd | ||
|
|
fa8ac94616 | ||
|
|
2199ada691 | ||
|
|
fd7611ff1f | ||
|
|
c20e8dffbb | ||
|
|
734e7a8e55 | ||
|
|
dbfc648a94 | ||
|
|
8e67dfab97 | ||
|
|
b38d454861 | ||
|
|
4b8336f7cf | ||
|
|
2697d2b687 | ||
|
|
690e097fed | ||
|
|
dc0a4db7c9 | ||
|
|
f58f842143 | ||
|
|
7184074c08 | ||
|
|
6679a5faeb | ||
|
|
90c72824c3 | ||
|
|
ad08dc12e0 | ||
|
|
7b54720ccb | ||
|
|
0922d32bce | ||
|
|
148d9f0e58 | ||
|
|
5070e418b8 | ||
|
|
054c3c2931 | ||
|
|
9406f3622d | ||
|
|
9816bfcaf5 | ||
|
|
52d6ad2a68 | ||
|
|
58e5151270 | ||
|
|
6e5ed2ccce | ||
|
|
54bd14a3fe | ||
|
|
c9ddc6effc | ||
|
|
16836e60bc | ||
|
|
e44436c31f | ||
|
|
34b3cf4b0c | ||
|
|
51618f7a83 | ||
|
|
b499acc0e8 | ||
|
|
67541d5841 | ||
|
|
989fab3c71 | ||
|
|
6bf8dfc4d8 | ||
|
|
e090646d47 | ||
|
|
b3bb2aabb8 | ||
|
|
e69efe2ef5 | ||
|
|
ccab609365 | ||
|
|
0a6866b839 | ||
|
|
cce1763d57 | ||
|
|
3d67dd0465 | ||
|
|
73e2f72a7c | ||
|
|
2926a45be6 | ||
|
|
b73fd4d936 | ||
|
|
bb2adc4496 | ||
|
|
b501aa82d5 | ||
|
|
46a703bb3b | ||
|
|
ff9340ca2c | ||
|
|
90a90ae2e1 | ||
|
|
66ed41aec8 | ||
|
|
ea2e2c5427 | ||
|
|
a5d731edec | ||
|
|
fc576226b2 | ||
|
|
c24fd7a2c3 | ||
|
|
5fb0a7ced7 | ||
|
|
2c26eac566 | ||
|
|
5badfb40eb | ||
|
|
f43fc6650c | ||
|
|
85361af1f7 | ||
|
|
ee40a9ebcd | ||
|
|
e8620110fc | ||
|
|
e988001872 | ||
|
|
6531bac59b | ||
|
|
2a82480df9 | ||
|
|
84a5b528ae | ||
|
|
511741735e | ||
|
|
2b8bc86679 | ||
|
|
4e2dbfa1af | ||
|
|
3a3bfcbf47 | ||
|
|
7be43586af | ||
|
|
d7085abec2 | ||
|
|
fc1d808c44 | ||
|
|
7485ef7e46 | ||
|
|
d2ecc7bad1 | ||
|
|
f121eccf29 | ||
|
|
00a9cf39ed | ||
|
|
c2d0053207 | ||
|
|
4c35d81147 | ||
|
|
28150fc70c | ||
|
|
d2c3163642 | ||
|
|
3153708f13 | ||
|
|
2f94f10342 | ||
|
|
b8a4fe5f8f | ||
|
|
648704522b | ||
|
|
4032b6778d | ||
|
|
5fa80da2d3 | ||
|
|
be371291bc | ||
|
|
1d531ff64f |
@@ -24,10 +24,10 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM golang:1.10.3 AS base
|
||||
FROM golang:1.10.6 AS base
|
||||
# FIXME(vdemeester) this is kept for other script depending on it to not fail right away
|
||||
# Remove this once the other scripts uses something else to detect the version
|
||||
ENV GO_VERSION 1.10.3
|
||||
ENV GO_VERSION 1.10.6
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
@@ -154,6 +154,7 @@ RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
FROM runtime-dev AS runc
|
||||
ENV INSTALL_BINARY_NAME=runc
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY git-bundles /go/src/github.com/docker/docker/git-bundles
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## Step 1: Build tests
|
||||
FROM golang:1.10.3-alpine3.7 as builder
|
||||
FROM golang:1.10.6-alpine3.7 as builder
|
||||
|
||||
RUN apk add --update \
|
||||
bash \
|
||||
|
||||
@@ -42,7 +42,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
|
||||
# with a heads-up.
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.10.3
|
||||
ENV GO_VERSION 1.10.6
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
@@ -161,7 +161,7 @@ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPref
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
# - FROM_DOCKERFILE is used for detection of building within a container.
|
||||
ENV GO_VERSION=1.10.3 `
|
||||
ENV GO_VERSION=1.10.6 `
|
||||
GIT_VERSION=2.11.1 `
|
||||
GOPATH=C:\go `
|
||||
FROM_DOCKERFILE=1
|
||||
|
||||
@@ -88,7 +88,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
|
||||
}
|
||||
|
||||
// PruneCache removes all cached build sources
|
||||
func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) {
|
||||
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
var fsCacheSize uint64
|
||||
@@ -102,9 +102,10 @@ func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport,
|
||||
})
|
||||
|
||||
var buildCacheSize int64
|
||||
var cacheIDs []string
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
buildCacheSize, err = b.buildkit.Prune(ctx)
|
||||
buildCacheSize, cacheIDs, err = b.buildkit.Prune(ctx, opts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to prune build cache")
|
||||
}
|
||||
@@ -115,7 +116,7 @@ func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize)}, nil
|
||||
return &types.BuildCachePruneReport{SpaceReclaimed: fsCacheSize + uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
|
||||
}
|
||||
|
||||
// Cancel cancels the build by ID
|
||||
|
||||
@@ -14,7 +14,7 @@ type Backend interface {
|
||||
Build(context.Context, backend.BuildConfig) (string, error)
|
||||
|
||||
// Prune build cache
|
||||
PruneCache(context.Context) (*types.BuildCachePruneReport, error)
|
||||
PruneCache(context.Context, types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
||||
|
||||
Cancel(context.Context, string) error
|
||||
}
|
||||
|
||||
@@ -7,15 +7,19 @@ import (
|
||||
|
||||
// buildRouter is a router to talk with the build controller
|
||||
type buildRouter struct {
|
||||
backend Backend
|
||||
daemon experimentalProvider
|
||||
routes []router.Route
|
||||
builderVersion types.BuilderVersion
|
||||
backend Backend
|
||||
daemon experimentalProvider
|
||||
routes []router.Route
|
||||
features *map[string]bool
|
||||
}
|
||||
|
||||
// NewRouter initializes a new build router
|
||||
func NewRouter(b Backend, d experimentalProvider, bv types.BuilderVersion) router.Router {
|
||||
r := &buildRouter{backend: b, daemon: d, builderVersion: bv}
|
||||
func NewRouter(b Backend, d experimentalProvider, features *map[string]bool) router.Router {
|
||||
r := &buildRouter{
|
||||
backend: b,
|
||||
daemon: d,
|
||||
features: features,
|
||||
}
|
||||
r.initRoutes()
|
||||
return r
|
||||
}
|
||||
@@ -32,3 +36,18 @@ func (r *buildRouter) initRoutes() {
|
||||
router.NewPostRoute("/build/cancel", r.postCancel),
|
||||
}
|
||||
}
|
||||
|
||||
// BuilderVersion derives the default docker builder version from the config
|
||||
// Note: it is valid to have BuilderVersion unset which means it is up to the
|
||||
// client to choose which builder to use.
|
||||
func BuilderVersion(features map[string]bool) types.BuilderVersion {
|
||||
var bv types.BuilderVersion
|
||||
if v, ok := features["buildkit"]; ok {
|
||||
if v {
|
||||
bv = types.BuilderBuildKit
|
||||
} else {
|
||||
bv = types.BuilderV1
|
||||
}
|
||||
}
|
||||
return bv
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
@@ -161,7 +162,29 @@ func parseVersion(s string) (types.BuilderVersion, error) {
|
||||
}
|
||||
|
||||
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
report, err := br.backend.PruneCache(ctx)
|
||||
if err := httputils.ParseForm(r); err != nil {
|
||||
return err
|
||||
}
|
||||
filters, err := filters.FromJSON(r.Form.Get("filters"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse filters")
|
||||
}
|
||||
ksfv := r.FormValue("keep-storage")
|
||||
if ksfv == "" {
|
||||
ksfv = "0"
|
||||
}
|
||||
ks, err := strconv.Atoi(ksfv)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)
|
||||
}
|
||||
|
||||
opts := types.BuildCachePruneOptions{
|
||||
All: httputils.BoolValue(r, "all"),
|
||||
Filters: filters,
|
||||
KeepStorage: int64(ks),
|
||||
}
|
||||
|
||||
report, err := br.backend.PruneCache(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -230,11 +253,6 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||
return errdefs.InvalidParameter(errors.New("squash is only supported with experimental mode"))
|
||||
}
|
||||
|
||||
// check if the builder feature has been enabled from daemon as well.
|
||||
if buildOptions.Version == types.BuilderBuildKit && br.builderVersion != "" && br.builderVersion != types.BuilderBuildKit {
|
||||
return errdefs.InvalidParameter(errors.New("buildkit is not enabled on daemon"))
|
||||
}
|
||||
|
||||
out := io.Writer(output)
|
||||
if buildOptions.SuppressOutput {
|
||||
out = notVerboseBuffer
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
gddohttputil "github.com/golang/gddo/httputil"
|
||||
)
|
||||
|
||||
@@ -37,7 +39,10 @@ func (s *containerRouter) postContainersCopy(ctx context.Context, w http.Respons
|
||||
|
||||
cfg := types.CopyConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
if cfg.Resource == "" {
|
||||
|
||||
@@ -3,6 +3,7 @@ package container // import "github.com/docker/docker/api/server/router/containe
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -44,7 +45,10 @@ func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.Re
|
||||
|
||||
execConfig := &types.ExecConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
if len(execConfig.Cmd) == 0 {
|
||||
@@ -84,7 +88,10 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||
|
||||
execStartCheck := &types.ExecStartCheck{}
|
||||
if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
if exists, err := s.backend.ExecExists(execName); !exists {
|
||||
|
||||
@@ -3,6 +3,7 @@ package network // import "github.com/docker/docker/api/server/router/network"
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -215,7 +216,10 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&create); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
|
||||
@@ -261,7 +265,10 @@ func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseW
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&connect); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
// Unlike other operations, we does not check ambiguity of the name/ID here.
|
||||
@@ -282,7 +289,10 @@ func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.Respon
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/pkg/errors"
|
||||
@@ -276,7 +278,10 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r
|
||||
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var args []string
|
||||
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
if err := pr.backend.Set(vars["name"], args); err != nil {
|
||||
return err
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
@@ -21,7 +22,16 @@ import (
|
||||
func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var req types.InitRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
// DefaultAddrPool and SubnetSize were added in API 1.39. Ignore on older API versions.
|
||||
if versions.LessThan(version, "1.39") {
|
||||
req.DefaultAddrPool = nil
|
||||
req.SubnetSize = 0
|
||||
}
|
||||
nodeID, err := sr.backend.Init(req)
|
||||
if err != nil {
|
||||
@@ -34,7 +44,10 @@ func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r
|
||||
func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var req types.JoinRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
return sr.backend.Join(req)
|
||||
}
|
||||
@@ -61,7 +74,10 @@ func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter
|
||||
func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var swarm types.Spec
|
||||
if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
rawVersion := r.URL.Query().Get("version")
|
||||
@@ -112,7 +128,10 @@ func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter,
|
||||
func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var req types.UnlockRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
if err := sr.backend.UnlockSwarm(req); err != nil {
|
||||
@@ -175,7 +194,10 @@ func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r
|
||||
func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var service types.ServiceSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
// Get returns "" if the header does not exist
|
||||
@@ -198,7 +220,10 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var service types.ServiceSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
rawVersion := r.URL.Query().Get("version")
|
||||
@@ -291,7 +316,10 @@ func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var node types.NodeSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&node); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
rawVersion := r.URL.Query().Get("version")
|
||||
@@ -370,7 +398,10 @@ func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r
|
||||
func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var secret types.SecretSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if secret.Templating != nil && versions.LessThan(version, "1.37") {
|
||||
@@ -408,6 +439,9 @@ func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *
|
||||
func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var secret types.SecretSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&secret); err != nil {
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
@@ -441,7 +475,10 @@ func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r
|
||||
func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var config types.ConfigSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
|
||||
return err
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
@@ -480,6 +517,9 @@ func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *
|
||||
func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
var config types.ConfigSpec
|
||||
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,30 +2,29 @@ package system // import "github.com/docker/docker/api/server/router/system"
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/server/router"
|
||||
"github.com/docker/docker/api/types"
|
||||
buildkit "github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
)
|
||||
|
||||
// systemRouter provides information about the Docker system overall.
|
||||
// It gathers information about host, daemon and container events.
|
||||
type systemRouter struct {
|
||||
backend Backend
|
||||
cluster ClusterBackend
|
||||
routes []router.Route
|
||||
fscache *fscache.FSCache // legacy
|
||||
builder *buildkit.Builder
|
||||
builderVersion types.BuilderVersion
|
||||
backend Backend
|
||||
cluster ClusterBackend
|
||||
routes []router.Route
|
||||
fscache *fscache.FSCache // legacy
|
||||
builder *buildkit.Builder
|
||||
features *map[string]bool
|
||||
}
|
||||
|
||||
// NewRouter initializes a new system router
|
||||
func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *buildkit.Builder, bv types.BuilderVersion) router.Router {
|
||||
func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache, builder *buildkit.Builder, features *map[string]bool) router.Router {
|
||||
r := &systemRouter{
|
||||
backend: b,
|
||||
cluster: c,
|
||||
fscache: fscache,
|
||||
builder: builder,
|
||||
builderVersion: bv,
|
||||
backend: b,
|
||||
cluster: c,
|
||||
fscache: fscache,
|
||||
builder: builder,
|
||||
features: features,
|
||||
}
|
||||
|
||||
r.routes = []router.Route{
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/api/server/router/build"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@@ -26,7 +27,8 @@ func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request,
|
||||
}
|
||||
|
||||
func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if bv := s.builderVersion; bv != "" {
|
||||
builderVersion := build.BuilderVersion(*s.features)
|
||||
if bv := builderVersion; bv != "" {
|
||||
w.Header().Set("Builder-Version", string(bv))
|
||||
}
|
||||
_, err := w.Write([]byte{'O', 'K'})
|
||||
|
||||
@@ -56,7 +56,7 @@ func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWri
|
||||
if err == io.EOF {
|
||||
return errdefs.InvalidParameter(errors.New("got EOF while reading request body"))
|
||||
}
|
||||
return err
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
volume, err := v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels))
|
||||
|
||||
@@ -1473,11 +1473,9 @@ definitions:
|
||||
type: "string"
|
||||
Options:
|
||||
description: "Driver-specific options, specified as a map."
|
||||
type: "array"
|
||||
items:
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
type: "object"
|
||||
additionalProperties:
|
||||
type: "string"
|
||||
|
||||
NetworkContainer:
|
||||
type: "object"
|
||||
@@ -1513,6 +1511,31 @@ definitions:
|
||||
aux:
|
||||
$ref: "#/definitions/ImageID"
|
||||
|
||||
BuildCache:
|
||||
type: "object"
|
||||
properties:
|
||||
ID:
|
||||
type: "string"
|
||||
Parent:
|
||||
type: "string"
|
||||
Type:
|
||||
type: "string"
|
||||
Description:
|
||||
type: "string"
|
||||
InUse:
|
||||
type: "boolean"
|
||||
Shared:
|
||||
type: "boolean"
|
||||
Size:
|
||||
type: "integer"
|
||||
CreatedAt:
|
||||
type: "integer"
|
||||
LastUsedAt:
|
||||
type: "integer"
|
||||
x-nullable: true
|
||||
UsageCount:
|
||||
type: "integer"
|
||||
|
||||
ImageID:
|
||||
type: "object"
|
||||
description: "Image ID or Digest"
|
||||
@@ -2434,6 +2457,22 @@ definitions:
|
||||
description: "Whether there is currently a root CA rotation in progress for the swarm"
|
||||
type: "boolean"
|
||||
example: false
|
||||
DefaultAddrPool:
|
||||
description: |
|
||||
Default Address Pool specifies default subnet pools for global scope networks.
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
format: "CIDR"
|
||||
example: ["10.10.0.0/16", "20.20.0.0/16"]
|
||||
SubnetSize:
|
||||
description: |
|
||||
SubnetSize specifies the subnet size of the networks created from the default subnet pool
|
||||
type: "integer"
|
||||
format: "uint32"
|
||||
maximum: 29
|
||||
default: 24
|
||||
example: 24
|
||||
|
||||
JoinTokens:
|
||||
description: |
|
||||
@@ -3722,18 +3761,22 @@ definitions:
|
||||
description: |
|
||||
HTTP-proxy configured for the daemon. This value is obtained from the
|
||||
[`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
|
||||
Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
|
||||
are masked in the API response.
|
||||
|
||||
Containers do not automatically inherit this configuration.
|
||||
type: "string"
|
||||
example: "http://user:pass@proxy.corp.example.com:8080"
|
||||
example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080"
|
||||
HttpsProxy:
|
||||
description: |
|
||||
HTTPS-proxy configured for the daemon. This value is obtained from the
|
||||
[`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
|
||||
Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
|
||||
are masked in the API response.
|
||||
|
||||
Containers do not automatically inherit this configuration.
|
||||
type: "string"
|
||||
example: "https://user:pass@proxy.corp.example.com:4443"
|
||||
example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443"
|
||||
NoProxy:
|
||||
description: |
|
||||
Comma-separated list of domain extensions for which no proxy should be
|
||||
@@ -3823,10 +3866,10 @@ definitions:
|
||||
$ref: "#/definitions/Runtime"
|
||||
default:
|
||||
runc:
|
||||
path: "docker-runc"
|
||||
path: "runc"
|
||||
example:
|
||||
runc:
|
||||
path: "docker-runc"
|
||||
path: "runc"
|
||||
runc-master:
|
||||
path: "/go/bin/runc"
|
||||
custom:
|
||||
@@ -6358,6 +6401,29 @@ paths:
|
||||
produces:
|
||||
- "application/json"
|
||||
operationId: "BuildPrune"
|
||||
parameters:
|
||||
- name: "keep-storage"
|
||||
in: "query"
|
||||
description: "Amount of disk space in bytes to keep for cache"
|
||||
type: "integer"
|
||||
format: "int64"
|
||||
- name: "all"
|
||||
in: "query"
|
||||
type: "boolean"
|
||||
description: "Remove all types of build cache"
|
||||
- name: "filters"
|
||||
in: "query"
|
||||
type: "string"
|
||||
description: |
|
||||
A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters:
|
||||
- `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h')
|
||||
- `id=<id>`
|
||||
- `parent=<id>`
|
||||
- `type=<string>`
|
||||
- `description=<string>`
|
||||
- `inuse`
|
||||
- `shared`
|
||||
- `private`
|
||||
responses:
|
||||
200:
|
||||
description: "No error"
|
||||
@@ -6365,6 +6431,11 @@ paths:
|
||||
type: "object"
|
||||
title: "BuildPruneResponse"
|
||||
properties:
|
||||
CachesDeleted:
|
||||
type: "array"
|
||||
items:
|
||||
description: "ID of build cache object"
|
||||
type: "string"
|
||||
SpaceReclaimed:
|
||||
description: "Disk space reclaimed in bytes"
|
||||
type: "integer"
|
||||
@@ -7199,6 +7270,10 @@ paths:
|
||||
type: "array"
|
||||
items:
|
||||
$ref: "#/definitions/Volume"
|
||||
BuildCache:
|
||||
type: "array"
|
||||
items:
|
||||
$ref: "#/definitions/BuildCache"
|
||||
example:
|
||||
LayersSize: 1092588
|
||||
Images:
|
||||
@@ -9284,7 +9359,10 @@ paths:
|
||||
|
||||
- name: "version"
|
||||
in: "query"
|
||||
description: "The version number of the service object being updated. This is required to avoid conflicting writes."
|
||||
description: "The version number of the service object being updated.
|
||||
This is required to avoid conflicting writes.
|
||||
This version number should be the value as currently set on the service *before* the update.
|
||||
You can find the current version by calling `GET /services/{id}`"
|
||||
required: true
|
||||
type: "integer"
|
||||
- name: "registryAuthFrom"
|
||||
|
||||
@@ -543,6 +543,7 @@ type ImagesPruneReport struct {
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct {
|
||||
CachesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
@@ -592,14 +593,21 @@ type BuildResult struct {
|
||||
|
||||
// BuildCache contains information about a build cache record
|
||||
type BuildCache struct {
|
||||
ID string
|
||||
Mutable bool
|
||||
InUse bool
|
||||
Size int64
|
||||
|
||||
ID string
|
||||
Parent string
|
||||
Type string
|
||||
Description string
|
||||
InUse bool
|
||||
Shared bool
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
Parent string
|
||||
Description string
|
||||
}
|
||||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
type BuildCachePruneOptions struct {
|
||||
All bool
|
||||
KeepStorage int64
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
@@ -51,6 +52,7 @@ type SourceOpt struct {
|
||||
DownloadManager distribution.RootFSDownloadManager
|
||||
MetadataStore metadata.V2MetadataService
|
||||
ImageStore image.Store
|
||||
ResolverOpt resolver.ResolveOptionsFunc
|
||||
}
|
||||
|
||||
type imageSource struct {
|
||||
@@ -71,17 +73,25 @@ func (is *imageSource) ID() string {
|
||||
return source.DockerImageScheme
|
||||
}
|
||||
|
||||
func (is *imageSource) getResolver(ctx context.Context) remotes.Resolver {
|
||||
return docker.NewResolver(docker.ResolverOptions{
|
||||
Client: tracing.DefaultClient,
|
||||
Credentials: is.getCredentialsFromSession(ctx),
|
||||
})
|
||||
func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string) remotes.Resolver {
|
||||
opt := docker.ResolverOptions{
|
||||
Client: tracing.DefaultClient,
|
||||
}
|
||||
if rfn != nil {
|
||||
opt = rfn(ref)
|
||||
}
|
||||
opt.Credentials = is.getCredentialsFromSession(ctx)
|
||||
r := docker.NewResolver(opt)
|
||||
return r
|
||||
}
|
||||
|
||||
func (is *imageSource) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
|
||||
id := session.FromContext(ctx)
|
||||
if id == "" {
|
||||
return nil
|
||||
// can be removed after containerd/containerd#2812
|
||||
return func(string) (string, string, error) {
|
||||
return "", "", nil
|
||||
}
|
||||
}
|
||||
return func(host string) (string, string, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
@@ -118,7 +128,7 @@ func (is *imageSource) resolveRemote(ctx context.Context, ref string, platform *
|
||||
dt []byte
|
||||
}
|
||||
res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) {
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx), is.ContentStore, platform)
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(ctx, is.ResolverOpt, ref), is.ContentStore, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -181,7 +191,7 @@ func (is *imageSource) Resolve(ctx context.Context, id source.Identifier) (sourc
|
||||
p := &puller{
|
||||
src: imageIdentifier,
|
||||
is: is,
|
||||
resolver: is.getResolver(ctx),
|
||||
resolver: is.getResolver(ctx, is.ResolverOpt, imageIdentifier.Reference.String()),
|
||||
platform: platform,
|
||||
}
|
||||
return p, nil
|
||||
@@ -516,6 +526,15 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: handle windows layers for cross platform builds
|
||||
|
||||
if p.src.RecordType != "" && cache.GetRecordType(ref) == "" {
|
||||
if err := cache.SetRecordType(ref, p.src.RecordType); err != nil {
|
||||
ref.Release(context.TODO())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var keyParent = []byte("parent")
|
||||
@@ -110,6 +110,10 @@ func (s *snapshotter) chainID(key string) (layer.ChainID, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (s *snapshotter) GetLayer(key string) (layer.Layer, error) {
|
||||
return s.getLayer(key, true)
|
||||
}
|
||||
|
||||
func (s *snapshotter) getLayer(key string, withCommitted bool) (layer.Layer, error) {
|
||||
s.mu.Lock()
|
||||
l, ok := s.refs[key]
|
||||
@@ -422,10 +426,11 @@ func (s *snapshotter) Close() error {
|
||||
}
|
||||
|
||||
type mountable struct {
|
||||
mu sync.Mutex
|
||||
mounts []mount.Mount
|
||||
acquire func() ([]mount.Mount, error)
|
||||
release func() error
|
||||
mu sync.Mutex
|
||||
mounts []mount.Mount
|
||||
acquire func() ([]mount.Mount, error)
|
||||
release func() error
|
||||
refCount int
|
||||
}
|
||||
|
||||
func (m *mountable) Mount() ([]mount.Mount, error) {
|
||||
@@ -433,6 +438,7 @@ func (m *mountable) Mount() ([]mount.Mount, error) {
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.mounts != nil {
|
||||
m.refCount++
|
||||
return m.mounts, nil
|
||||
}
|
||||
|
||||
@@ -441,6 +447,7 @@ func (m *mountable) Mount() ([]mount.Mount, error) {
|
||||
return nil, err
|
||||
}
|
||||
m.mounts = mounts
|
||||
m.refCount = 1
|
||||
|
||||
return m.mounts, nil
|
||||
}
|
||||
@@ -448,6 +455,13 @@ func (m *mountable) Mount() ([]mount.Mount, error) {
|
||||
func (m *mountable) Release() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.refCount > 1 {
|
||||
m.refCount--
|
||||
return nil
|
||||
}
|
||||
|
||||
m.refCount = 0
|
||||
if m.release == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package buildkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
@@ -13,32 +14,67 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libnetwork"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
grpcmetadata "google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type errMultipleFilterValues struct{}
|
||||
|
||||
func (errMultipleFilterValues) Error() string { return "filters expect only one value" }
|
||||
|
||||
func (errMultipleFilterValues) InvalidParameter() {}
|
||||
|
||||
type errConflictFilter struct {
|
||||
a, b string
|
||||
}
|
||||
|
||||
func (e errConflictFilter) Error() string {
|
||||
return fmt.Sprintf("conflicting filters: %q and %q", e.a, e.b)
|
||||
}
|
||||
|
||||
func (errConflictFilter) InvalidParameter() {}
|
||||
|
||||
var cacheFields = map[string]bool{
|
||||
"id": true,
|
||||
"parent": true,
|
||||
"type": true,
|
||||
"description": true,
|
||||
"inuse": true,
|
||||
"shared": true,
|
||||
"private": true,
|
||||
// fields from buildkit that are not exposed
|
||||
"mutable": false,
|
||||
"immutable": false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
llbsolver.AllowNetworkHostUnstable = true
|
||||
}
|
||||
|
||||
// Opt is option struct required for creating the builder
|
||||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
Dist images.DistributionServices
|
||||
NetworkController libnetwork.NetworkController
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
Dist images.DistributionServices
|
||||
NetworkController libnetwork.NetworkController
|
||||
DefaultCgroupParent string
|
||||
ResolverOpt resolver.ResolveOptionsFunc
|
||||
BuilderConfig config.BuilderConfig
|
||||
}
|
||||
|
||||
// Builder can build using BuildKit backend
|
||||
@@ -86,48 +122,72 @@ func (b *Builder) DiskUsage(ctx context.Context) ([]*types.BuildCache, error) {
|
||||
var items []*types.BuildCache
|
||||
for _, r := range duResp.Record {
|
||||
items = append(items, &types.BuildCache{
|
||||
ID: r.ID,
|
||||
Mutable: r.Mutable,
|
||||
InUse: r.InUse,
|
||||
Size: r.Size_,
|
||||
|
||||
ID: r.ID,
|
||||
Parent: r.Parent,
|
||||
Type: r.RecordType,
|
||||
Description: r.Description,
|
||||
InUse: r.InUse,
|
||||
Shared: r.Shared,
|
||||
Size: r.Size_,
|
||||
CreatedAt: r.CreatedAt,
|
||||
LastUsedAt: r.LastUsedAt,
|
||||
UsageCount: int(r.UsageCount),
|
||||
Parent: r.Parent,
|
||||
Description: r.Description,
|
||||
})
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Prune clears all reclaimable build cache
|
||||
func (b *Builder) Prune(ctx context.Context) (int64, error) {
|
||||
func (b *Builder) Prune(ctx context.Context, opts types.BuildCachePruneOptions) (int64, []string, error) {
|
||||
ch := make(chan *controlapi.UsageRecord)
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
validFilters := make(map[string]bool, 1+len(cacheFields))
|
||||
validFilters["unused-for"] = true
|
||||
validFilters["until"] = true
|
||||
validFilters["label"] = true // TODO(tiborvass): handle label
|
||||
validFilters["label!"] = true // TODO(tiborvass): handle label!
|
||||
for k, v := range cacheFields {
|
||||
validFilters[k] = v
|
||||
}
|
||||
if err := opts.Filters.Validate(validFilters); err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
pi, err := toBuildkitPruneInfo(opts)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
defer close(ch)
|
||||
return b.controller.Prune(&controlapi.PruneRequest{}, &pruneProxy{
|
||||
return b.controller.Prune(&controlapi.PruneRequest{
|
||||
All: pi.All,
|
||||
KeepDuration: int64(pi.KeepDuration),
|
||||
KeepBytes: pi.KeepBytes,
|
||||
Filter: pi.Filter,
|
||||
}, &pruneProxy{
|
||||
streamProxy: streamProxy{ctx: ctx},
|
||||
ch: ch,
|
||||
})
|
||||
})
|
||||
|
||||
var size int64
|
||||
var cacheIDs []string
|
||||
eg.Go(func() error {
|
||||
for r := range ch {
|
||||
size += r.Size_
|
||||
cacheIDs = append(cacheIDs, r.ID)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return 0, err
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
return size, nil
|
||||
return size, cacheIDs, nil
|
||||
}
|
||||
|
||||
// Build executes a build request
|
||||
@@ -179,7 +239,9 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
|
||||
id := identity.NewID()
|
||||
|
||||
frontendAttrs := map[string]string{}
|
||||
frontendAttrs := map[string]string{
|
||||
"override-copy-image": "docker.io/docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061",
|
||||
}
|
||||
|
||||
if opt.Options.Target != "" {
|
||||
frontendAttrs["target"] = opt.Options.Target
|
||||
@@ -460,6 +522,7 @@ func toBuildkitExtraHosts(inp []string) (string, error) {
|
||||
hosts := make([]string, 0, len(inp))
|
||||
for _, h := range inp {
|
||||
parts := strings.Split(h, ":")
|
||||
|
||||
if len(parts) != 2 || parts[0] == "" || net.ParseIP(parts[1]) == nil {
|
||||
return "", errors.Errorf("invalid host %s", h)
|
||||
}
|
||||
@@ -467,3 +530,56 @@ func toBuildkitExtraHosts(inp []string) (string, error) {
|
||||
}
|
||||
return strings.Join(hosts, ","), nil
|
||||
}
|
||||
|
||||
func toBuildkitPruneInfo(opts types.BuildCachePruneOptions) (client.PruneInfo, error) {
|
||||
var until time.Duration
|
||||
untilValues := opts.Filters.Get("until") // canonical
|
||||
unusedForValues := opts.Filters.Get("unused-for") // deprecated synonym for "until" filter
|
||||
|
||||
if len(untilValues) > 0 && len(unusedForValues) > 0 {
|
||||
return client.PruneInfo{}, errConflictFilter{"until", "unused-for"}
|
||||
}
|
||||
filterKey := "until"
|
||||
if len(unusedForValues) > 0 {
|
||||
filterKey = "unused-for"
|
||||
}
|
||||
untilValues = append(untilValues, unusedForValues...)
|
||||
|
||||
switch len(untilValues) {
|
||||
case 0:
|
||||
// nothing to do
|
||||
case 1:
|
||||
var err error
|
||||
until, err = time.ParseDuration(untilValues[0])
|
||||
if err != nil {
|
||||
return client.PruneInfo{}, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", filterKey)
|
||||
}
|
||||
default:
|
||||
return client.PruneInfo{}, errMultipleFilterValues{}
|
||||
}
|
||||
|
||||
bkFilter := make([]string, 0, opts.Filters.Len())
|
||||
for cacheField := range cacheFields {
|
||||
if opts.Filters.Include(cacheField) {
|
||||
values := opts.Filters.Get(cacheField)
|
||||
switch len(values) {
|
||||
case 0:
|
||||
bkFilter = append(bkFilter, cacheField)
|
||||
case 1:
|
||||
if cacheField == "id" {
|
||||
bkFilter = append(bkFilter, cacheField+"~="+values[0])
|
||||
} else {
|
||||
bkFilter = append(bkFilter, cacheField+"=="+values[0])
|
||||
}
|
||||
default:
|
||||
return client.PruneInfo{}, errMultipleFilterValues{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return client.PruneInfo{
|
||||
All: opts.All,
|
||||
KeepDuration: until,
|
||||
KeepBytes: opts.KeepStorage,
|
||||
Filter: []string{strings.Join(bkFilter, ",")},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -6,14 +6,19 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/content/local"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
|
||||
containerimageexp "github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/imagerefchecker"
|
||||
mobyworker "github.com/docker/docker/builder/builder-next/worker"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
@@ -21,7 +26,7 @@ import (
|
||||
"github.com/moby/buildkit/frontend/gateway"
|
||||
"github.com/moby/buildkit/frontend/gateway/forwarder"
|
||||
"github.com/moby/buildkit/snapshot/blobmapping"
|
||||
"github.com/moby/buildkit/solver/boltdbcachestorage"
|
||||
"github.com/moby/buildkit/solver/bboltcachestorage"
|
||||
"github.com/moby/buildkit/worker"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -69,9 +74,20 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
MetadataStore: md,
|
||||
})
|
||||
|
||||
layerGetter, ok := sbase.(imagerefchecker.LayerGetter)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("snapshotter does not implement layergetter")
|
||||
}
|
||||
|
||||
refChecker := imagerefchecker.New(imagerefchecker.Opt{
|
||||
ImageStore: dist.ImageStore,
|
||||
LayerGetter: layerGetter,
|
||||
})
|
||||
|
||||
cm, err := cache.NewManager(cache.ManagerOpt{
|
||||
Snapshotter: snapshotter,
|
||||
MetadataStore: md,
|
||||
Snapshotter: snapshotter,
|
||||
MetadataStore: md,
|
||||
PruneRefChecker: refChecker,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -85,12 +101,13 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
MetadataStore: dist.V2MetadataService,
|
||||
ImageStore: dist.ImageStore,
|
||||
ReferenceStore: dist.ReferenceStore,
|
||||
ResolverOpt: opt.ResolverOpt,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exec, err := newExecutor(root, opt.NetworkController)
|
||||
exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,17 +126,23 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheStorage, err := boltdbcachestorage.NewStore(filepath.Join(opt.Root, "cache.db"))
|
||||
cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(opt.Root, "cache.db"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcPolicy, err := getGCPolicy(opt.BuilderConfig, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get builder GC policy")
|
||||
}
|
||||
|
||||
wopt := mobyworker.Opt{
|
||||
ID: "moby",
|
||||
SessionManager: opt.SessionManager,
|
||||
MetadataStore: md,
|
||||
ContentStore: store,
|
||||
CacheManager: cm,
|
||||
GCPolicy: gcPolicy,
|
||||
Snapshotter: snapshotter,
|
||||
Executor: exec,
|
||||
ImageSource: src,
|
||||
@@ -148,7 +171,48 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||
WorkerController: wc,
|
||||
Frontends: frontends,
|
||||
CacheKeyStorage: cacheStorage,
|
||||
ResolveCacheImporterFunc: registryremotecache.ResolveCacheImporterFunc(opt.SessionManager),
|
||||
ResolveCacheImporterFunc: registryremotecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt),
|
||||
// TODO: set ResolveCacheExporterFunc for exporting cache
|
||||
})
|
||||
}
|
||||
|
||||
func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, error) {
|
||||
var gcPolicy []client.PruneInfo
|
||||
if conf.GC.Enabled {
|
||||
var (
|
||||
defaultKeepStorage int64
|
||||
err error
|
||||
)
|
||||
|
||||
if conf.GC.DefaultKeepStorage != "" {
|
||||
defaultKeepStorage, err = units.RAMInBytes(conf.GC.DefaultKeepStorage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not parse '%s' as Builder.GC.DefaultKeepStorage config", conf.GC.DefaultKeepStorage)
|
||||
}
|
||||
}
|
||||
|
||||
if conf.GC.Policy == nil {
|
||||
gcPolicy = mobyworker.DefaultGCPolicy(root, defaultKeepStorage)
|
||||
} else {
|
||||
gcPolicy = make([]client.PruneInfo, len(conf.GC.Policy))
|
||||
for i, p := range conf.GC.Policy {
|
||||
b, err := units.RAMInBytes(p.KeepStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
b = defaultKeepStorage
|
||||
}
|
||||
gcPolicy[i], err = toBuildkitPruneInfo(types.BuildCachePruneOptions{
|
||||
All: p.All,
|
||||
KeepStorage: b,
|
||||
Filters: p.Filter,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return gcPolicy, nil
|
||||
}
|
||||
|
||||
@@ -3,41 +3,47 @@
|
||||
package buildkit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/executor/runcexecutor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/network"
|
||||
"github.com/pkg/errors"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const networkName = "bridge"
|
||||
|
||||
func newExecutor(root string, net libnetwork.NetworkController) (executor.Executor, error) {
|
||||
// FIXME: fix bridge networking
|
||||
_ = bridgeProvider{}
|
||||
func newExecutor(root, cgroupParent string, net libnetwork.NetworkController) (executor.Executor, error) {
|
||||
networkProviders := map[pb.NetMode]network.Provider{
|
||||
pb.NetMode_UNSET: &bridgeProvider{NetworkController: net},
|
||||
pb.NetMode_HOST: network.NewHostProvider(),
|
||||
pb.NetMode_NONE: network.NewNoneProvider(),
|
||||
}
|
||||
return runcexecutor.New(runcexecutor.Opt{
|
||||
Root: filepath.Join(root, "executor"),
|
||||
CommandCandidates: []string{"docker-runc", "runc"},
|
||||
}, nil)
|
||||
Root: filepath.Join(root, "executor"),
|
||||
CommandCandidates: []string{"runc"},
|
||||
DefaultCgroupParent: cgroupParent,
|
||||
}, networkProviders)
|
||||
}
|
||||
|
||||
type bridgeProvider struct {
|
||||
libnetwork.NetworkController
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) NewInterface() (network.Interface, error) {
|
||||
func (p *bridgeProvider) New() (network.Namespace, error) {
|
||||
n, err := p.NetworkByName(networkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iface := &lnInterface{ready: make(chan struct{})}
|
||||
iface := &lnInterface{ready: make(chan struct{}), provider: p}
|
||||
iface.Once.Do(func() {
|
||||
go iface.init(p.NetworkController, n)
|
||||
})
|
||||
@@ -45,46 +51,26 @@ func (p *bridgeProvider) NewInterface() (network.Interface, error) {
|
||||
return iface, nil
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) Release(iface network.Interface) error {
|
||||
go func() {
|
||||
if err := p.release(iface); err != nil {
|
||||
logrus.Errorf("%s", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) release(iface network.Interface) error {
|
||||
li, ok := iface.(*lnInterface)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid interface %T", iface)
|
||||
}
|
||||
err := li.sbx.Delete()
|
||||
if err1 := li.ep.Delete(true); err1 != nil && err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type lnInterface struct {
|
||||
ep libnetwork.Endpoint
|
||||
sbx libnetwork.Sandbox
|
||||
sync.Once
|
||||
err error
|
||||
ready chan struct{}
|
||||
err error
|
||||
ready chan struct{}
|
||||
provider *bridgeProvider
|
||||
}
|
||||
|
||||
func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Network) {
|
||||
defer close(iface.ready)
|
||||
id := identity.NewID()
|
||||
|
||||
ep, err := n.CreateEndpoint(id)
|
||||
ep, err := n.CreateEndpoint(id, libnetwork.CreateOptionDisableResolution())
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
}
|
||||
|
||||
sbx, err := c.NewSandbox(id)
|
||||
sbx, err := c.NewSandbox(id, libnetwork.OptionUseExternalKey())
|
||||
if err != nil {
|
||||
iface.err = err
|
||||
return
|
||||
@@ -99,14 +85,26 @@ func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Netw
|
||||
iface.ep = ep
|
||||
}
|
||||
|
||||
func (iface *lnInterface) Set(pid int) error {
|
||||
func (iface *lnInterface) Set(s *specs.Spec) {
|
||||
<-iface.ready
|
||||
if iface.err != nil {
|
||||
return iface.err
|
||||
return
|
||||
}
|
||||
// attach netns to bridge within the container namespace, using reexec in a prestart hook
|
||||
s.Hooks = &specs.Hooks{
|
||||
Prestart: []specs.Hook{{
|
||||
Path: filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"),
|
||||
Args: []string{"libnetwork-setkey", iface.sbx.ContainerID(), iface.provider.NetworkController.ID()},
|
||||
}},
|
||||
}
|
||||
return iface.sbx.SetKey(fmt.Sprintf("/proc/%d/ns/net", pid))
|
||||
}
|
||||
|
||||
func (iface *lnInterface) Remove(pid int) error {
|
||||
return nil
|
||||
func (iface *lnInterface) Close() error {
|
||||
<-iface.ready
|
||||
go func() {
|
||||
if err := iface.sbx.Delete(); err != nil {
|
||||
logrus.Errorf("failed to delete builder network sandbox: %v", err)
|
||||
}
|
||||
}()
|
||||
return iface.err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/moby/buildkit/executor"
|
||||
)
|
||||
|
||||
func newExecutor(_ string, _ libnetwork.NetworkController) (executor.Executor, error) {
|
||||
func newExecutor(_, _ string, _ libnetwork.NetworkController) (executor.Executor, error) {
|
||||
return &winExecutor{}, nil
|
||||
}
|
||||
|
||||
|
||||
96
builder/builder-next/imagerefchecker/checker.go
Normal file
96
builder/builder-next/imagerefchecker/checker.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package imagerefchecker
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/moby/buildkit/cache"
|
||||
)
|
||||
|
||||
// LayerGetter abstracts away the snapshotter
|
||||
type LayerGetter interface {
|
||||
GetLayer(string) (layer.Layer, error)
|
||||
}
|
||||
|
||||
// Opt represents the options needed to create a refchecker
|
||||
type Opt struct {
|
||||
LayerGetter LayerGetter
|
||||
ImageStore image.Store
|
||||
}
|
||||
|
||||
// New creates new image reference checker that can be used to see if a reference
|
||||
// is being used by any of the images in the image store
|
||||
func New(opt Opt) cache.ExternalRefCheckerFunc {
|
||||
return func() (cache.ExternalRefChecker, error) {
|
||||
return &checker{opt: opt, layers: lchain{}, cache: map[string]bool{}}, nil
|
||||
}
|
||||
}
|
||||
|
||||
type lchain map[layer.DiffID]lchain
|
||||
|
||||
func (c lchain) add(ids []layer.DiffID) {
|
||||
if len(ids) == 0 {
|
||||
return
|
||||
}
|
||||
id := ids[0]
|
||||
ch, ok := c[id]
|
||||
if !ok {
|
||||
ch = lchain{}
|
||||
c[id] = ch
|
||||
}
|
||||
ch.add(ids[1:])
|
||||
}
|
||||
|
||||
func (c lchain) has(ids []layer.DiffID) bool {
|
||||
if len(ids) == 0 {
|
||||
return true
|
||||
}
|
||||
ch, ok := c[ids[0]]
|
||||
return ok && ch.has(ids[1:])
|
||||
}
|
||||
|
||||
type checker struct {
|
||||
opt Opt
|
||||
once sync.Once
|
||||
layers lchain
|
||||
cache map[string]bool
|
||||
}
|
||||
|
||||
func (c *checker) Exists(key string) bool {
|
||||
if c.opt.ImageStore == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
c.once.Do(c.init)
|
||||
|
||||
if b, ok := c.cache[key]; ok {
|
||||
return b
|
||||
}
|
||||
|
||||
l, err := c.opt.LayerGetter.GetLayer(key)
|
||||
if err != nil || l == nil {
|
||||
c.cache[key] = false
|
||||
return false
|
||||
}
|
||||
|
||||
ok := c.layers.has(diffIDs(l))
|
||||
c.cache[key] = ok
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *checker) init() {
|
||||
imgs := c.opt.ImageStore.Map()
|
||||
|
||||
for _, img := range imgs {
|
||||
c.layers.add(img.RootFS.DiffIDs)
|
||||
}
|
||||
}
|
||||
|
||||
func diffIDs(l layer.Layer) []layer.DiffID {
|
||||
p := l.Parent()
|
||||
if p == nil {
|
||||
return []layer.DiffID{l.DiffID()}
|
||||
}
|
||||
return append(diffIDs(p), l.DiffID())
|
||||
}
|
||||
51
builder/builder-next/worker/gc.go
Normal file
51
builder/builder-next/worker/gc.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/moby/buildkit/client"
|
||||
)
|
||||
|
||||
const defaultCap int64 = 2e9 // 2GB
|
||||
|
||||
// tempCachePercent represents the percentage ratio of the cache size in bytes to temporarily keep for a short period of time (couple of days)
|
||||
// over the total cache size in bytes. Because there is no perfect value, a mathematically pleasing one was chosen.
|
||||
// The value is approximately 13.8
|
||||
const tempCachePercent = math.E * math.Pi * math.Phi
|
||||
|
||||
// DefaultGCPolicy returns a default builder GC policy
|
||||
func DefaultGCPolicy(p string, defaultKeepBytes int64) []client.PruneInfo {
|
||||
keep := defaultKeepBytes
|
||||
if defaultKeepBytes == 0 {
|
||||
keep = detectDefaultGCCap(p)
|
||||
}
|
||||
|
||||
tempCacheKeepBytes := int64(math.Round(float64(keep) / 100. * float64(tempCachePercent)))
|
||||
const minTempCacheKeepBytes = 512 * 1e6 // 512MB
|
||||
if tempCacheKeepBytes < minTempCacheKeepBytes {
|
||||
tempCacheKeepBytes = minTempCacheKeepBytes
|
||||
}
|
||||
|
||||
return []client.PruneInfo{
|
||||
// if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days
|
||||
{
|
||||
Filter: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"},
|
||||
KeepDuration: 48 * 3600, // 48h
|
||||
KeepBytes: tempCacheKeepBytes,
|
||||
},
|
||||
// remove any data not used for 60 days
|
||||
{
|
||||
KeepDuration: 60 * 24 * 3600, // 60d
|
||||
KeepBytes: keep,
|
||||
},
|
||||
// keep the unshared build cache under cap
|
||||
{
|
||||
KeepBytes: keep,
|
||||
},
|
||||
// if previous policies were insufficient start deleting internal data to keep build cache under cap
|
||||
{
|
||||
All: true,
|
||||
KeepBytes: keep,
|
||||
},
|
||||
}
|
||||
}
|
||||
17
builder/builder-next/worker/gc_unix.go
Normal file
17
builder/builder-next/worker/gc_unix.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// +build !windows
|
||||
|
||||
package worker
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func detectDefaultGCCap(root string) int64 {
|
||||
var st syscall.Statfs_t
|
||||
if err := syscall.Statfs(root, &st); err != nil {
|
||||
return defaultCap
|
||||
}
|
||||
diskSize := int64(st.Bsize) * int64(st.Blocks) // nolint unconvert
|
||||
avail := diskSize / 10
|
||||
return (avail/(1<<30) + 1) * 1e9 // round up
|
||||
}
|
||||
7
builder/builder-next/worker/gc_windows.go
Normal file
7
builder/builder-next/worker/gc_windows.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build windows
|
||||
|
||||
package worker
|
||||
|
||||
func detectDefaultGCCap(root string) int64 {
|
||||
return defaultCap
|
||||
}
|
||||
@@ -46,6 +46,7 @@ import (
|
||||
type Opt struct {
|
||||
ID string
|
||||
Labels map[string]string
|
||||
GCPolicy []client.PruneInfo
|
||||
SessionManager *session.Manager
|
||||
MetadataStore *metadata.Store
|
||||
Executor executor.Executor
|
||||
@@ -130,9 +131,18 @@ func (w *Worker) Platforms() []ocispec.Platform {
|
||||
return []ocispec.Platform{platforms.DefaultSpec()}
|
||||
}
|
||||
|
||||
// GCPolicy returns automatic GC Policy
|
||||
func (w *Worker) GCPolicy() []client.PruneInfo {
|
||||
return w.Opt.GCPolicy
|
||||
}
|
||||
|
||||
// LoadRef loads a reference by ID
|
||||
func (w *Worker) LoadRef(id string) (cache.ImmutableRef, error) {
|
||||
return w.CacheManager.Get(context.TODO(), id)
|
||||
func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) {
|
||||
var opts []cache.RefOption
|
||||
if hidden {
|
||||
opts = append(opts, cache.NoUpdateLastUsed)
|
||||
}
|
||||
return w.CacheManager.Get(context.TODO(), id, opts...)
|
||||
}
|
||||
|
||||
// ResolveOp converts a LLB vertex into a LLB operation
|
||||
@@ -176,8 +186,8 @@ func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*cl
|
||||
}
|
||||
|
||||
// Prune deletes reclaimable build cache
|
||||
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info client.PruneInfo) error {
|
||||
return w.CacheManager.Prune(ctx, ch, info)
|
||||
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
|
||||
return w.CacheManager.Prune(ctx, ch, info...)
|
||||
}
|
||||
|
||||
// Exporter returns exporter by name
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
@@ -23,6 +22,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fsutiltypes "github.com/tonistiigi/fsutil/types"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
@@ -614,7 +615,7 @@ func (s sortableCacheSources) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func newTarsumHash(stat *fsutil.Stat) (hash.Hash, error) {
|
||||
func newTarsumHash(stat *fsutiltypes.Stat) (hash.Hash, error) {
|
||||
fi := &fsutil.StatInfo{Stat: stat}
|
||||
p := stat.Path
|
||||
if fi.IsDir() {
|
||||
|
||||
@@ -4,19 +4,34 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BuildCachePrune requests the daemon to delete unused cache data
|
||||
func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) {
|
||||
func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
||||
if err := cli.NewVersionError("1.31", "build prune"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
report := types.BuildCachePruneReport{}
|
||||
|
||||
serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil)
|
||||
query := url.Values{}
|
||||
if opts.All {
|
||||
query.Set("all", "1")
|
||||
}
|
||||
query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage))
|
||||
filters, err := filters.ToJSON(opts.Filters)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "prune could not marshal filters option")
|
||||
}
|
||||
query.Set("filters", filters)
|
||||
|
||||
serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -413,7 +413,7 @@ func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
|
||||
func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
|
||||
return func(ctx context.Context) (net.Conn, error) {
|
||||
if transport, ok := cli.client.Transport.(*http.Transport); ok {
|
||||
if transport.DialContext != nil {
|
||||
if transport.DialContext != nil && transport.TLSClientConfig == nil {
|
||||
return transport.DialContext(ctx, cli.proto, cli.addr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ type DistributionAPIClient interface {
|
||||
// ImageAPIClient defines API client methods for the images
|
||||
type ImageAPIClient interface {
|
||||
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
||||
BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
|
||||
BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
||||
BuildCancel(ctx context.Context, id string) error
|
||||
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
|
||||
|
||||
@@ -195,10 +195,18 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(serverResp.body)
|
||||
bodyMax := 1 * 1024 * 1024 // 1 MiB
|
||||
bodyR := &io.LimitedReader{
|
||||
R: serverResp.body,
|
||||
N: int64(bodyMax),
|
||||
}
|
||||
body, err := ioutil.ReadAll(bodyR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bodyR.N == 0 {
|
||||
return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL)
|
||||
}
|
||||
if len(body) == 0 {
|
||||
return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
|
||||
}
|
||||
|
||||
@@ -5,12 +5,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
// TestSetHostHeader should set fake host for local communications, set real host
|
||||
@@ -87,3 +89,18 @@ func TestPlainTextError(t *testing.T) {
|
||||
t.Fatalf("expected a Server Error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInfiniteError(t *testing.T) {
|
||||
infinitR := rand.New(rand.NewSource(42))
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
resp := &http.Response{StatusCode: http.StatusInternalServerError}
|
||||
resp.Header = http.Header{}
|
||||
resp.Body = ioutil.NopCloser(infinitR)
|
||||
return resp, nil
|
||||
}),
|
||||
}
|
||||
|
||||
_, err := client.Ping(context.Background())
|
||||
assert.Check(t, is.ErrorContains(err, "request returned Internal Server Error"))
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
containerddefaults "github.com/containerd/containerd/defaults"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/docker/docker/api"
|
||||
apiserver "github.com/docker/docker/api/server"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
swarmrouter "github.com/docker/docker/api/server/router/swarm"
|
||||
systemrouter "github.com/docker/docker/api/server/router/system"
|
||||
"github.com/docker/docker/api/server/router/volume"
|
||||
"github.com/docker/docker/api/types"
|
||||
buildkit "github.com/docker/docker/builder/builder-next"
|
||||
"github.com/docker/docker/builder/dockerfile"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
@@ -141,22 +141,25 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if cli.Config.ContainerdAddr == "" && runtime.GOOS != "windows" {
|
||||
opts, err := cli.getContainerdDaemonOpts()
|
||||
if err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("Failed to generate containerd options: %v", err)
|
||||
if !systemContainerdRunning() {
|
||||
opts, err := cli.getContainerdDaemonOpts()
|
||||
if err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("Failed to generate containerd options: %v", err)
|
||||
}
|
||||
|
||||
r, err := supervisor.Start(ctx, filepath.Join(cli.Config.Root, "containerd"), filepath.Join(cli.Config.ExecRoot, "containerd"), opts...)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("Failed to start containerd: %v", err)
|
||||
}
|
||||
cli.Config.ContainerdAddr = r.Address()
|
||||
|
||||
// Try to wait for containerd to shutdown
|
||||
defer r.WaitTimeout(10 * time.Second)
|
||||
} else {
|
||||
cli.Config.ContainerdAddr = containerddefaults.DefaultAddress
|
||||
}
|
||||
|
||||
r, err := supervisor.Start(ctx, filepath.Join(cli.Config.Root, "containerd"), filepath.Join(cli.Config.ExecRoot, "containerd"), opts...)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return fmt.Errorf("Failed to start containerd: %v", err)
|
||||
}
|
||||
|
||||
cli.Config.ContainerdAddr = r.Address()
|
||||
|
||||
// Try to wait for containerd to shutdown
|
||||
defer r.WaitTimeout(10 * time.Second)
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
@@ -253,14 +256,14 @@ type routerOptions struct {
|
||||
sessionManager *session.Manager
|
||||
buildBackend *buildbackend.Backend
|
||||
buildCache *fscache.FSCache // legacy
|
||||
features *map[string]bool
|
||||
buildkit *buildkit.Builder
|
||||
builderVersion types.BuilderVersion
|
||||
daemon *daemon.Daemon
|
||||
api *apiserver.Server
|
||||
cluster *cluster.Cluster
|
||||
}
|
||||
|
||||
func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptions, error) {
|
||||
func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
|
||||
opts := routerOptions{}
|
||||
sm, err := session.NewManager()
|
||||
if err != nil {
|
||||
@@ -281,39 +284,35 @@ func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptio
|
||||
return opts, errors.Wrap(err, "failed to create fscache")
|
||||
}
|
||||
|
||||
manager, err := dockerfile.NewBuildManager(daemon.BuilderBackend(), sm, buildCache, daemon.IdentityMapping())
|
||||
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), sm, buildCache, d.IdentityMapping())
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
cgroupParent := newCgroupParent(config)
|
||||
bk, err := buildkit.New(buildkit.Opt{
|
||||
SessionManager: sm,
|
||||
Root: filepath.Join(config.Root, "buildkit"),
|
||||
Dist: daemon.DistributionServices(),
|
||||
NetworkController: daemon.NetworkController(),
|
||||
SessionManager: sm,
|
||||
Root: filepath.Join(config.Root, "buildkit"),
|
||||
Dist: d.DistributionServices(),
|
||||
NetworkController: d.NetworkController(),
|
||||
DefaultCgroupParent: cgroupParent,
|
||||
ResolverOpt: d.NewResolveOptionsFunc(),
|
||||
BuilderConfig: config.Builder,
|
||||
})
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
|
||||
bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache, bk)
|
||||
bb, err := buildbackend.NewBackend(d.ImageService(), manager, buildCache, bk)
|
||||
if err != nil {
|
||||
return opts, errors.Wrap(err, "failed to create buildmanager")
|
||||
}
|
||||
var bv types.BuilderVersion
|
||||
if v, ok := config.Features["buildkit"]; ok {
|
||||
if v {
|
||||
bv = types.BuilderBuildKit
|
||||
} else {
|
||||
bv = types.BuilderV1
|
||||
}
|
||||
}
|
||||
return routerOptions{
|
||||
sessionManager: sm,
|
||||
buildBackend: bb,
|
||||
buildCache: buildCache,
|
||||
buildkit: bk,
|
||||
builderVersion: bv,
|
||||
daemon: daemon,
|
||||
features: d.Features(),
|
||||
daemon: d,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -486,9 +485,9 @@ func initRouter(opts routerOptions) {
|
||||
checkpointrouter.NewRouter(opts.daemon, decoder),
|
||||
container.NewRouter(opts.daemon, decoder),
|
||||
image.NewRouter(opts.daemon.ImageService()),
|
||||
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit, opts.builderVersion),
|
||||
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit, opts.features),
|
||||
volume.NewRouter(opts.daemon.VolumesService()),
|
||||
build.NewRouter(opts.buildBackend, opts.daemon, opts.builderVersion),
|
||||
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
|
||||
sessionrouter.NewRouter(opts.sessionManager),
|
||||
swarmrouter.NewRouter(opts.cluster),
|
||||
pluginrouter.NewRouter(opts.daemon.PluginManager()),
|
||||
@@ -666,3 +665,8 @@ func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGette
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func systemContainerdRunning() bool {
|
||||
_, err := os.Lstat(containerddefaults.DefaultAddress)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/containerd/containerd/runtime/v1/linux"
|
||||
"github.com/docker/docker/cmd/dockerd/hack"
|
||||
"github.com/docker/docker/daemon"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/libcontainerd/supervisor"
|
||||
"github.com/docker/libnetwork/portallocator"
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -107,3 +108,18 @@ func wrapListeners(proto string, ls []net.Listener) []net.Listener {
|
||||
}
|
||||
return ls
|
||||
}
|
||||
|
||||
func newCgroupParent(config *config.Config) string {
|
||||
cgroupParent := "docker"
|
||||
useSystemd := daemon.UsingSystemd(config)
|
||||
if useSystemd {
|
||||
cgroupParent = "system.slice"
|
||||
}
|
||||
if config.CgroupParent != "" {
|
||||
cgroupParent = config.CgroupParent
|
||||
}
|
||||
if useSystemd {
|
||||
cgroupParent = cgroupParent + ":" + "docker" + ":"
|
||||
}
|
||||
return cgroupParent
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/libcontainerd/supervisor"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/windows"
|
||||
@@ -83,3 +84,7 @@ func allocateDaemonPort(addr string) error {
|
||||
func wrapListeners(proto string, ls []net.Listener) []net.Listener {
|
||||
return ls
|
||||
}
|
||||
|
||||
func newCgroupParent(config *config.Config) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -265,8 +265,11 @@ flags=(
|
||||
CGROUP_HUGETLB
|
||||
NET_CLS_CGROUP $netprio
|
||||
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED
|
||||
IP_NF_TARGET_REDIRECT
|
||||
IP_VS
|
||||
IP_VS_NFCT
|
||||
IP_VS_PROTO_TCP
|
||||
IP_VS_PROTO_UDP
|
||||
IP_VS_RR
|
||||
)
|
||||
check_flags "${flags[@]}"
|
||||
|
||||
@@ -31,7 +31,7 @@ bundle_files(){
|
||||
echo $BUNDLE/binary-daemon/$f
|
||||
fi
|
||||
done
|
||||
for f in docker-containerd docker-containerd-ctr docker-containerd-shim docker-init docker-runc; do
|
||||
for f in containerd ctr containerd-shim docker-init runc; do
|
||||
echo $BUNDLE/binary-daemon/$f
|
||||
done
|
||||
if [ -d $BUNDLE/dynbinary-client ]; then
|
||||
|
||||
@@ -123,7 +123,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
|
||||
return logger.ErrReadLogsNotSupported{}
|
||||
}
|
||||
logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1})
|
||||
defer logs.Close()
|
||||
defer logs.ConsumerGone()
|
||||
|
||||
LogLoop:
|
||||
for {
|
||||
|
||||
@@ -41,6 +41,7 @@ package cluster // import "github.com/docker/docker/daemon/cluster"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -67,9 +68,10 @@ const stateFile = "docker-state.json"
|
||||
const defaultAddr = "0.0.0.0:2377"
|
||||
|
||||
const (
|
||||
initialReconnectDelay = 100 * time.Millisecond
|
||||
maxReconnectDelay = 30 * time.Second
|
||||
contextPrefix = "com.docker.swarm"
|
||||
initialReconnectDelay = 100 * time.Millisecond
|
||||
maxReconnectDelay = 30 * time.Second
|
||||
contextPrefix = "com.docker.swarm"
|
||||
defaultRecvSizeForListResponse = math.MaxInt32 // the max recv limit grpc <1.4.0
|
||||
)
|
||||
|
||||
// NetworkSubnetsProvider exposes functions for retrieving the subnets
|
||||
|
||||
@@ -3,6 +3,7 @@ package cluster // import "github.com/docker/docker/daemon/cluster"
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -87,6 +88,41 @@ func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (st
|
||||
return systemAddr.String(), listenAddrPort, nil
|
||||
}
|
||||
|
||||
// validateDefaultAddrPool validates default address pool
|
||||
// it also strips white space from the string before validation
|
||||
func validateDefaultAddrPool(defaultAddrPool []string, size uint32) error {
|
||||
if defaultAddrPool == nil {
|
||||
// defaultAddrPool is not defined
|
||||
return nil
|
||||
}
|
||||
//if size is not set, then we use default value 24
|
||||
if size == 0 {
|
||||
size = 24
|
||||
}
|
||||
// We allow max value as 29. We can have 8 IP addresses for max value 29
|
||||
// If we allow 30, then we will get only 4 IP addresses. But with latest
|
||||
// libnetwork LB scale implementation, we use total of 4 IP addresses for internal use.
|
||||
// Hence keeping 29 as max value, we will have 8 IP addresses. This will be
|
||||
// smallest subnet that can be used in overlay network.
|
||||
if size > 29 {
|
||||
return fmt.Errorf("subnet size is out of range: %d", size)
|
||||
}
|
||||
for i := range defaultAddrPool {
|
||||
// trim leading and trailing white spaces
|
||||
defaultAddrPool[i] = strings.TrimSpace(defaultAddrPool[i])
|
||||
_, b, err := net.ParseCIDR(defaultAddrPool[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base pool %s: %v", defaultAddrPool[i], err)
|
||||
}
|
||||
ones, _ := b.Mask.Size()
|
||||
if size < uint32(ones) {
|
||||
return fmt.Errorf("invalid CIDR: %q. Subnet size is too small for pool: %d", defaultAddrPool[i], size)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveDataPathAddr(dataPathAddr string) (string, error) {
|
||||
if dataPathAddr == "" {
|
||||
// dataPathAddr is not defined
|
||||
|
||||
@@ -3,7 +3,6 @@ package cluster // import "github.com/docker/docker/daemon/cluster"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -14,6 +13,7 @@ import (
|
||||
"github.com/docker/docker/daemon/cluster/executor/container"
|
||||
lncluster "github.com/docker/libnetwork/cluster"
|
||||
swarmapi "github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/allocator/cnmallocator"
|
||||
swarmnode "github.com/docker/swarmkit/node"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -115,12 +115,6 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
||||
joinAddr = conf.RemoteAddr
|
||||
}
|
||||
|
||||
var defaultAddrPool []*net.IPNet
|
||||
for _, address := range conf.DefaultAddressPool {
|
||||
if _, b, err := net.ParseCIDR(address); err == nil {
|
||||
defaultAddrPool = append(defaultAddrPool, b)
|
||||
}
|
||||
}
|
||||
// Hostname is not set here. Instead, it is obtained from
|
||||
// the node description that is reported periodically
|
||||
swarmnodeConfig := swarmnode.Config{
|
||||
@@ -128,11 +122,13 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
||||
ListenControlAPI: control,
|
||||
ListenRemoteAPI: conf.ListenAddr,
|
||||
AdvertiseRemoteAPI: conf.AdvertiseAddr,
|
||||
DefaultAddrPool: defaultAddrPool,
|
||||
SubnetSize: int(conf.SubnetSize),
|
||||
JoinAddr: joinAddr,
|
||||
StateDir: n.cluster.root,
|
||||
JoinToken: conf.joinToken,
|
||||
NetworkConfig: &cnmallocator.NetworkConfig{
|
||||
DefaultAddrPool: conf.DefaultAddressPool,
|
||||
SubnetSize: conf.SubnetSize,
|
||||
},
|
||||
JoinAddr: joinAddr,
|
||||
StateDir: n.cluster.root,
|
||||
JoinToken: conf.joinToken,
|
||||
Executor: container.NewExecutor(
|
||||
n.cluster.config.Backend,
|
||||
n.cluster.config.PluginBackend,
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
gogotypes "github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GetServices returns all services of a managed swarm cluster.
|
||||
@@ -67,7 +68,9 @@ func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Serv
|
||||
|
||||
r, err := state.controlClient.ListServices(
|
||||
ctx,
|
||||
&swarmapi.ListServicesRequest{Filters: filters})
|
||||
&swarmapi.ListServicesRequest{Filters: filters},
|
||||
grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -92,6 +92,10 @@ func (c *Cluster) Init(req types.InitRequest) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
//Validate Default Address Pool input
|
||||
if err := validateDefaultAddrPool(req.DefaultAddrPool, req.SubnetSize); err != nil {
|
||||
return "", err
|
||||
}
|
||||
nr, err := c.newNodeRunner(nodeStartConfig{
|
||||
forceNewCluster: req.ForceNewCluster,
|
||||
autolock: req.AutoLockManagers,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
types "github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/daemon/cluster/convert"
|
||||
swarmapi "github.com/docker/swarmkit/api"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GetTasks returns a list of tasks matching the filter options.
|
||||
@@ -53,7 +54,9 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro
|
||||
|
||||
r, err = state.controlClient.ListTasks(
|
||||
ctx,
|
||||
&swarmapi.ListTasksRequest{Filters: filters})
|
||||
&swarmapi.ListTasksRequest{Filters: filters},
|
||||
grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
|
||||
)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
|
||||
22
daemon/config/builder.go
Normal file
22
daemon/config/builder.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package config
|
||||
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
// BuilderGCRule represents a GC rule for buildkit cache
|
||||
type BuilderGCRule struct {
|
||||
All bool `json:",omitempty"`
|
||||
Filter filters.Args `json:",omitempty"`
|
||||
KeepStorage string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// BuilderGCConfig contains GC config for a buildkit builder
|
||||
type BuilderGCConfig struct {
|
||||
Enabled bool `json:",omitempty"`
|
||||
Policy []BuilderGCRule `json:",omitempty"`
|
||||
DefaultKeepStorage string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// BuilderConfig contains config for the builder
|
||||
type BuilderConfig struct {
|
||||
GC BuilderGCConfig `json:",omitempty"`
|
||||
}
|
||||
@@ -55,6 +55,7 @@ var flatOptions = map[string]bool{
|
||||
"runtimes": true,
|
||||
"default-ulimits": true,
|
||||
"features": true,
|
||||
"builder": true,
|
||||
}
|
||||
|
||||
// skipValidateOptions contains configuration keys
|
||||
@@ -62,6 +63,17 @@ var flatOptions = map[string]bool{
|
||||
// for unknown flag validation.
|
||||
var skipValidateOptions = map[string]bool{
|
||||
"features": true,
|
||||
"builder": true,
|
||||
}
|
||||
|
||||
// skipDuplicates contains configuration keys that
|
||||
// will be skipped when checking duplicated
|
||||
// configuration field defined in both daemon
|
||||
// config file and from dockerd cli flags.
|
||||
// This allows some configurations to be merged
|
||||
// during the parsing.
|
||||
var skipDuplicates = map[string]bool{
|
||||
"runtimes": true,
|
||||
}
|
||||
|
||||
// LogConfig represents the default log configuration.
|
||||
@@ -215,6 +227,8 @@ type CommonConfig struct {
|
||||
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
|
||||
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
|
||||
Features map[string]bool `json:"features,omitempty"`
|
||||
|
||||
Builder BuilderConfig `json:"builder,omitempty"`
|
||||
}
|
||||
|
||||
// IsValueSet returns true if a configuration value
|
||||
@@ -491,13 +505,13 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
|
||||
duplicatedConflicts := func(f *pflag.Flag) {
|
||||
// search option name in the json configuration payload if the value is a named option
|
||||
if namedOption, ok := f.Value.(opts.NamedOption); ok {
|
||||
if optsValue, ok := config[namedOption.Name()]; ok {
|
||||
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
|
||||
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
|
||||
}
|
||||
} else {
|
||||
// search flag name in the json configuration payload
|
||||
for _, name := range []string{f.Name, f.Shorthand} {
|
||||
if value, ok := config[name]; ok {
|
||||
if value, ok := config[name]; ok && !skipDuplicates[name] {
|
||||
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
|
||||
break
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
@@ -23,6 +24,8 @@ import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/containerd/pkg/dialer"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
@@ -36,6 +39,8 @@ import (
|
||||
"github.com/docker/docker/daemon/logger"
|
||||
"github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/moby/buildkit/util/resolver"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/sirupsen/logrus"
|
||||
// register graph drivers
|
||||
_ "github.com/docker/docker/daemon/graphdriver/register"
|
||||
@@ -136,6 +141,62 @@ func (daemon *Daemon) HasExperimental() bool {
|
||||
return daemon.configStore != nil && daemon.configStore.Experimental
|
||||
}
|
||||
|
||||
// Features returns the features map from configStore
|
||||
func (daemon *Daemon) Features() *map[string]bool {
|
||||
return &daemon.configStore.Features
|
||||
}
|
||||
|
||||
// NewResolveOptionsFunc returns a call back function to resolve "registry-mirrors" and
|
||||
// "insecure-registries" for buildkit
|
||||
func (daemon *Daemon) NewResolveOptionsFunc() resolver.ResolveOptionsFunc {
|
||||
return func(ref string) docker.ResolverOptions {
|
||||
var (
|
||||
registryKey = "docker.io"
|
||||
mirrors = make([]string, len(daemon.configStore.Mirrors))
|
||||
m = map[string]resolver.RegistryConf{}
|
||||
)
|
||||
// must trim "https://" or "http://" prefix
|
||||
for i, v := range daemon.configStore.Mirrors {
|
||||
v = strings.TrimPrefix(v, "https://")
|
||||
v = strings.TrimPrefix(v, "http://")
|
||||
mirrors[i] = v
|
||||
}
|
||||
// set "registry-mirrors"
|
||||
m[registryKey] = resolver.RegistryConf{Mirrors: mirrors}
|
||||
// set "insecure-registries"
|
||||
for _, v := range daemon.configStore.InsecureRegistries {
|
||||
v = strings.TrimPrefix(v, "http://")
|
||||
m[v] = resolver.RegistryConf{
|
||||
PlainHTTP: true,
|
||||
}
|
||||
}
|
||||
def := docker.ResolverOptions{
|
||||
Client: tracing.DefaultClient,
|
||||
}
|
||||
|
||||
parsed, err := reference.ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
host := reference.Domain(parsed)
|
||||
|
||||
c, ok := m[host]
|
||||
if !ok {
|
||||
return def
|
||||
}
|
||||
|
||||
if len(c.Mirrors) > 0 {
|
||||
def.Host = func(string) (string, error) {
|
||||
return c.Mirrors[rand.Intn(len(c.Mirrors))], nil
|
||||
}
|
||||
}
|
||||
|
||||
def.PlainHTTP = c.PlainHTTP
|
||||
|
||||
return def
|
||||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) restore() error {
|
||||
containers := make(map[string]*container.Container)
|
||||
|
||||
|
||||
@@ -54,11 +54,11 @@ import (
|
||||
const (
|
||||
// DefaultShimBinary is the default shim to be used by containerd if none
|
||||
// is specified
|
||||
DefaultShimBinary = "docker-containerd-shim"
|
||||
DefaultShimBinary = "containerd-shim"
|
||||
|
||||
// DefaultRuntimeBinary is the default runtime to be used by
|
||||
// containerd if none is specified
|
||||
DefaultRuntimeBinary = "docker-runc"
|
||||
DefaultRuntimeBinary = "runc"
|
||||
|
||||
// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
|
||||
linuxMinCPUShares = 2
|
||||
@@ -76,7 +76,7 @@ const (
|
||||
|
||||
// DefaultRuntimeName is the default runtime to be used by
|
||||
// containerd if none is specified
|
||||
DefaultRuntimeName = "docker-runc"
|
||||
DefaultRuntimeName = "runc"
|
||||
)
|
||||
|
||||
type containerGetter interface {
|
||||
@@ -482,14 +482,14 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi
|
||||
}
|
||||
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus)
|
||||
if err != nil {
|
||||
return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus)
|
||||
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus)
|
||||
}
|
||||
if !cpusAvailable {
|
||||
return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus)
|
||||
}
|
||||
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems)
|
||||
if err != nil {
|
||||
return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems)
|
||||
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems)
|
||||
}
|
||||
if !memsAvailable {
|
||||
return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems)
|
||||
@@ -1480,7 +1480,7 @@ func (daemon *Daemon) initCgroupsPath(path string) error {
|
||||
// for the period and runtime as this limits what the children can be set to.
|
||||
daemon.initCgroupsPath(filepath.Dir(path))
|
||||
|
||||
mnt, root, err := cgroups.FindCgroupMountpointAndRoot("cpu")
|
||||
mnt, root, err := cgroups.FindCgroupMountpointAndRoot("", "cpu")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -193,12 +193,15 @@ func verifyContainerResources(resources *containertypes.Resources, isHyperv bool
|
||||
// hostconfig and config structures.
|
||||
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
|
||||
warnings := []string{}
|
||||
|
||||
osv := system.GetOSVersion()
|
||||
hyperv := daemon.runAsHyperVContainer(hostConfig)
|
||||
if !hyperv && system.IsWindowsClient() && !system.IsIoTCore() {
|
||||
// @engine maintainers. This block should not be removed. It partially enforces licensing
|
||||
// restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this.
|
||||
return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers")
|
||||
|
||||
// On RS5, we allow (but don't strictly support) process isolation on Client SKUs.
|
||||
// Prior to RS5, we don't allow process isolation on Client SKUs.
|
||||
// @engine maintainers. This block should not be removed. It partially enforces licensing
|
||||
// restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this.
|
||||
if !hyperv && system.IsWindowsClient() && osv.Build < 17763 {
|
||||
return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers")
|
||||
}
|
||||
|
||||
w, err := verifyContainerResources(&hostConfig.Resources, hyperv)
|
||||
@@ -323,7 +326,8 @@ func (daemon *Daemon) initNetworkController(config *config.Config, activeSandbox
|
||||
// discover and add HNS networks to windows
|
||||
// network that exist are removed and added again
|
||||
for _, v := range hnsresponse {
|
||||
if strings.ToLower(v.Type) == "private" {
|
||||
networkTypeNorm := strings.ToLower(v.Type)
|
||||
if networkTypeNorm == "private" || networkTypeNorm == "internal" {
|
||||
continue // workaround for HNS reporting unsupported networks
|
||||
}
|
||||
var n libnetwork.Network
|
||||
@@ -591,9 +595,12 @@ func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
|
||||
// daemon to run in. This is only applicable on Windows
|
||||
func (daemon *Daemon) setDefaultIsolation() error {
|
||||
daemon.defaultIsolation = containertypes.Isolation("process")
|
||||
// On client SKUs, default to Hyper-V. Note that IoT reports as a client SKU
|
||||
// but it should not be treated as such.
|
||||
if system.IsWindowsClient() && !system.IsIoTCore() {
|
||||
osv := system.GetOSVersion()
|
||||
|
||||
// On client SKUs, default to Hyper-V. @engine maintainers. This
|
||||
// should not be removed. Ping @jhowardmsft is there are PRs to
|
||||
// to change this.
|
||||
if system.IsWindowsClient() {
|
||||
daemon.defaultIsolation = containertypes.Isolation("hyperv")
|
||||
}
|
||||
for _, option := range daemon.configStore.ExecOptions {
|
||||
@@ -612,10 +619,11 @@ func (daemon *Daemon) setDefaultIsolation() error {
|
||||
daemon.defaultIsolation = containertypes.Isolation("hyperv")
|
||||
}
|
||||
if containertypes.Isolation(val).IsProcess() {
|
||||
if system.IsWindowsClient() && !system.IsIoTCore() {
|
||||
if system.IsWindowsClient() && osv.Build < 17763 {
|
||||
// On RS5, we allow (but don't strictly support) process isolation on Client SKUs.
|
||||
// @engine maintainers. This block should not be removed. It partially enforces licensing
|
||||
// restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this.
|
||||
return fmt.Errorf("Windows client operating systems only support Hyper-V containers")
|
||||
return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers")
|
||||
}
|
||||
daemon.defaultIsolation = containertypes.Isolation("process")
|
||||
}
|
||||
|
||||
@@ -29,10 +29,12 @@ import (
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -81,6 +83,15 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For some reason shared mount propagation between a container
|
||||
// and the host does not work for btrfs, and a remedy is to bind
|
||||
// mount graphdriver home to itself (even without changing the
|
||||
// propagation mode).
|
||||
err = mount.MakeMount(home)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make %s a mount", home)
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
home: home,
|
||||
uidMaps: uidMaps,
|
||||
@@ -158,7 +169,19 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) {
|
||||
|
||||
// Cleanup unmounts the home directory.
|
||||
func (d *Driver) Cleanup() error {
|
||||
return d.subvolDisableQuota()
|
||||
err := d.subvolDisableQuota()
|
||||
umountErr := mount.Unmount(d.home)
|
||||
|
||||
// in case we have two errors, prefer the one from disableQuota()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if umountErr != nil {
|
||||
return errors.Wrapf(umountErr, "error unmounting %s", d.home)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func free(p *C.char) {
|
||||
|
||||
@@ -27,7 +27,7 @@ type directLVMConfig struct {
|
||||
var (
|
||||
errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
|
||||
errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
|
||||
errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm")
|
||||
errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm")
|
||||
)
|
||||
|
||||
func validateLVMConfig(cfg directLVMConfig) error {
|
||||
|
||||
@@ -195,6 +195,7 @@ type Options struct {
|
||||
func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
|
||||
if name != "" {
|
||||
logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver
|
||||
logDeprecatedWarning(name)
|
||||
return GetDriver(name, pg, config)
|
||||
}
|
||||
|
||||
@@ -232,6 +233,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err
|
||||
}
|
||||
|
||||
logrus.Infof("[graphdriver] using prior storage driver: %s", name)
|
||||
logDeprecatedWarning(name)
|
||||
return driver, nil
|
||||
}
|
||||
}
|
||||
@@ -245,6 +247,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
logDeprecatedWarning(name)
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
@@ -257,6 +260,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
logDeprecatedWarning(name)
|
||||
return driver, nil
|
||||
}
|
||||
return nil, fmt.Errorf("No supported storage backend found")
|
||||
@@ -305,3 +309,20 @@ func isEmptyDir(name string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isDeprecated checks if a storage-driver is marked "deprecated"
|
||||
func isDeprecated(name string) bool {
|
||||
switch name {
|
||||
// NOTE: when deprecating a driver, update daemon.fillDriverInfo() accordingly
|
||||
case "devicemapper", "overlay":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// logDeprecatedWarning logs a warning if the given storage-driver is marked "deprecated"
|
||||
func logDeprecatedWarning(name string) {
|
||||
if isDeprecated(name) {
|
||||
logrus.Warnf("[graphdriver] WARNING: the %s storage-driver is deprecated, and will be removed in a future release", name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,20 +71,33 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/Microsoft/hcsshim/ext4/tar2ext4"
|
||||
"github.com/Microsoft/opengcs/client"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// noreexec controls reexec functionality. Off by default, on for debugging purposes.
|
||||
var noreexec = false
|
||||
|
||||
// init registers this driver to the register. It gets initialised by the
|
||||
// function passed in the second parameter, implemented in this file.
|
||||
func init() {
|
||||
graphdriver.Register("lcow", InitDriver)
|
||||
// DOCKER_LCOW_NOREEXEC allows for inline processing which makes
|
||||
// debugging issues in the re-exec codepath significantly easier.
|
||||
if os.Getenv("DOCKER_LCOW_NOREEXEC") != "" {
|
||||
logrus.Warnf("LCOW Graphdriver is set to not re-exec. This is intended for debugging purposes only.")
|
||||
noreexec = true
|
||||
} else {
|
||||
reexec.Register("docker-lcow-tar2ext4", tar2ext4Reexec)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -846,32 +859,72 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||
func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
||||
logrus.Debugf("lcowdriver: applydiff: id %s", id)
|
||||
|
||||
svm, err := d.startServiceVMIfNotRunning(id, nil, fmt.Sprintf("applydiff %s", id))
|
||||
// Log failures here as it's undiagnosable sometimes, due to a possible panic.
|
||||
// See https://github.com/moby/moby/issues/37955 for more information.
|
||||
|
||||
dest := filepath.Join(d.dataRoot, id, layerFilename)
|
||||
if !noreexec {
|
||||
cmd := reexec.Command([]string{"docker-lcow-tar2ext4", dest}...)
|
||||
stdout := bytes.NewBuffer(nil)
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
cmd.Stdin = diff
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
logrus.Warnf("lcowdriver: applydiff: id %s failed to start re-exec: %s", id, err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err)
|
||||
return 0, fmt.Errorf("re-exec error: %v: stderr: %s", err, stderr)
|
||||
}
|
||||
return strconv.ParseInt(stdout.String(), 10, 64)
|
||||
}
|
||||
// The inline case
|
||||
size, err := tar2ext4Actual(dest, diff)
|
||||
if err != nil {
|
||||
logrus.Warnf("lcowdriver: applydiff: id %s failed %s", id, err)
|
||||
}
|
||||
return size, err
|
||||
}
|
||||
|
||||
// tar2ext4Reexec is the re-exec entry point for writing a layer from a tar file
|
||||
func tar2ext4Reexec() {
|
||||
size, err := tar2ext4Actual(os.Args[1], os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprint(os.Stdout, size)
|
||||
}
|
||||
|
||||
// tar2ext4Actual is the implementation of tar2ext to write a layer from a tar file.
|
||||
// It can be called through re-exec (default), or inline for debugging.
|
||||
func tar2ext4Actual(dest string, diff io.Reader) (int64, error) {
|
||||
// maxDiskSize is not relating to the sandbox size - this is the
|
||||
// maximum possible size a layer VHD generated can be from an EXT4
|
||||
// layout perspective.
|
||||
const maxDiskSize = 128 * 1024 * 1024 * 1024 // 128GB
|
||||
out, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false)
|
||||
|
||||
logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting")
|
||||
err = svm.getStartError()
|
||||
defer out.Close()
|
||||
if err := tar2ext4.Convert(
|
||||
diff,
|
||||
out,
|
||||
tar2ext4.AppendVhdFooter,
|
||||
tar2ext4.ConvertWhiteout,
|
||||
tar2ext4.MaximumDiskSize(maxDiskSize)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
fi, err := os.Stat(dest)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err)
|
||||
}
|
||||
|
||||
// TODO @jhowardmsft - the retries are temporary to overcome platform reliability issues.
|
||||
// Obviously this will be removed as platform bugs are fixed.
|
||||
retries := 0
|
||||
for {
|
||||
retries++
|
||||
size, err := svm.config.TarToVhd(filepath.Join(d.dataRoot, id, layerFilename), diff)
|
||||
if err != nil {
|
||||
if retries <= 10 {
|
||||
continue
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return size, err
|
||||
return 0, err
|
||||
}
|
||||
return fi.Size(), nil
|
||||
}
|
||||
|
||||
// Changes produces a list of changes between the specified layer
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -27,7 +26,7 @@ func doesSupportNativeDiff(d string) error {
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(td); err != nil {
|
||||
logrus.WithField("storage-driver", "overlay2").Warnf("Failed to remove check directory %v: %v", td, err)
|
||||
logger.Warnf("Failed to remove check directory %v: %v", td, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -62,7 +61,7 @@ func doesSupportNativeDiff(d string) error {
|
||||
}
|
||||
defer func() {
|
||||
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
|
||||
logrus.WithField("storage-driver", "overlay2").Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
|
||||
logger.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -113,7 +112,7 @@ func supportsMultipleLowerDir(d string) error {
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(td); err != nil {
|
||||
logrus.WithField("storage-driver", "overlay2").Warnf("Failed to remove check directory %v: %v", td, err)
|
||||
logger.Warnf("Failed to remove check directory %v: %v", td, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -128,7 +127,7 @@ func supportsMultipleLowerDir(d string) error {
|
||||
return errors.Wrap(err, "failed to mount overlay")
|
||||
}
|
||||
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
|
||||
logrus.WithField("storage-driver", "overlay2").Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
|
||||
logger.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -106,11 +106,14 @@ type Driver struct {
|
||||
}
|
||||
|
||||
var (
|
||||
logger = logrus.WithField("storage-driver", "overlay2")
|
||||
backingFs = "<unknown>"
|
||||
projectQuotaSupported = false
|
||||
|
||||
useNaiveDiffLock sync.Once
|
||||
useNaiveDiffOnly bool
|
||||
|
||||
indexOff string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -155,8 +158,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
backingFs = fsName
|
||||
}
|
||||
|
||||
logger := logrus.WithField("storage-driver", "overlay2")
|
||||
|
||||
switch fsMagic {
|
||||
case graphdriver.FsMagicAufs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs:
|
||||
logger.Errorf("'overlay2' is not supported over %s", backingFs)
|
||||
@@ -228,7 +229,18 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs)
|
||||
}
|
||||
|
||||
logger.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported)
|
||||
// figure out whether "index=off" option is recognized by the kernel
|
||||
_, err = os.Stat("/sys/module/overlay/parameters/index")
|
||||
switch {
|
||||
case err == nil:
|
||||
indexOff = "index=off,"
|
||||
case os.IsNotExist(err):
|
||||
// old kernel, no index -- do nothing
|
||||
default:
|
||||
logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err)
|
||||
}
|
||||
|
||||
logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
@@ -277,14 +289,14 @@ func supportsOverlay() error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
logrus.WithField("storage-driver", "overlay2").Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
|
||||
logger.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
|
||||
return graphdriver.ErrNotSupported
|
||||
}
|
||||
|
||||
func useNaiveDiff(home string) bool {
|
||||
useNaiveDiffLock.Do(func() {
|
||||
if err := doesSupportNativeDiff(home); err != nil {
|
||||
logrus.WithField("storage-driver", "overlay2").Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err)
|
||||
logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err)
|
||||
useNaiveDiffOnly = true
|
||||
}
|
||||
})
|
||||
@@ -522,9 +534,9 @@ func (d *Driver) Remove(id string) error {
|
||||
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
|
||||
if err == nil {
|
||||
if len(lid) == 0 {
|
||||
logrus.WithField("storage-driver", "overlay2").Errorf("refusing to remove empty link for layer %v", id)
|
||||
logger.Errorf("refusing to remove empty link for layer %v", id)
|
||||
} else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
|
||||
logrus.WithField("storage-driver", "overlay2").Debugf("Failed to remove link: %v", err)
|
||||
logger.Debugf("Failed to remove link: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -561,11 +573,11 @@ func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr e
|
||||
if retErr != nil {
|
||||
if c := d.ctr.Decrement(mergedDir); c <= 0 {
|
||||
if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
|
||||
logrus.WithField("storage-driver", "overlay2").Errorf("error unmounting %v: %v", mergedDir, mntErr)
|
||||
logger.Errorf("error unmounting %v: %v", mergedDir, mntErr)
|
||||
}
|
||||
// Cleanup the created merged directory; see the comment in Put's rmdir
|
||||
if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) {
|
||||
logrus.WithField("storage-driver", "overlay2").Debugf("Failed to remove %s: %v: %v", id, rmErr, err)
|
||||
logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -577,7 +589,7 @@ func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr e
|
||||
for i, s := range splitLowers {
|
||||
absLowers[i] = path.Join(d.home, s)
|
||||
}
|
||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work"))
|
||||
opts := indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + path.Join(dir, "diff") + ",workdir=" + path.Join(dir, "work")
|
||||
mountData := label.FormatMountLabel(opts, mountLabel)
|
||||
mount := unix.Mount
|
||||
mountTarget := mergedDir
|
||||
@@ -606,7 +618,7 @@ func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr e
|
||||
// fit within a page and relative links make the mount data much
|
||||
// smaller at the expense of requiring a fork exec to chroot.
|
||||
if len(mountData) > pageSize {
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
|
||||
opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, "diff") + ",workdir=" + path.Join(id, "work")
|
||||
mountData = label.FormatMountLabel(opts, mountLabel)
|
||||
if len(mountData) > pageSize {
|
||||
return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||
@@ -648,7 +660,6 @@ func (d *Driver) Put(id string) error {
|
||||
}
|
||||
|
||||
mountpoint := path.Join(dir, "merged")
|
||||
logger := logrus.WithField("storage-driver", "overlay2")
|
||||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -704,7 +715,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64
|
||||
|
||||
applyDir := d.getDiffPath(id)
|
||||
|
||||
logrus.WithField("storage-driver", "overlay2").Debugf("Applying tar in %s", applyDir)
|
||||
logger.Debugf("Applying tar in %s", applyDir)
|
||||
// Overlay doesn't need the parent id to apply the diff
|
||||
if err := untar(diff, applyDir, &archive.TarOptions{
|
||||
UIDMaps: d.uidMaps,
|
||||
@@ -742,7 +753,7 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
diffPath := d.getDiffPath(id)
|
||||
logrus.WithField("storage-driver", "overlay2").Debugf("Tar with options on %s", diffPath)
|
||||
logger.Debugf("Tar with options on %s", diffPath)
|
||||
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
UIDMaps: d.uidMaps,
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -47,7 +46,7 @@ func generateID(l int) string {
|
||||
if retryOnError(err) && retries < maxretries {
|
||||
count += n
|
||||
retries++
|
||||
logrus.Errorf("error generating version 4 uuid, retrying: %v", err)
|
||||
logger.Errorf("error generating version 4 uuid, retrying: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -23,6 +22,7 @@ import (
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar"
|
||||
"github.com/Microsoft/go-winio/backuptar"
|
||||
"github.com/Microsoft/go-winio/vhd"
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
@@ -331,7 +332,18 @@ func (d *Driver) Remove(id string) error {
|
||||
tmpID := fmt.Sprintf("%s-removing", rID)
|
||||
tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID)
|
||||
if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
if !os.IsPermission(err) {
|
||||
return err
|
||||
}
|
||||
// If permission denied, it's possible that the scratch is still mounted, an
|
||||
// artifact after a hard daemon crash for example. Worth a shot to try detaching it
|
||||
// before retrying the rename.
|
||||
if detachErr := vhd.DetachVhd(filepath.Join(layerPath, "sandbox.vhdx")); detachErr != nil {
|
||||
return errors.Wrapf(err, "failed to detach VHD: %s", detachErr)
|
||||
}
|
||||
if renameErr := os.Rename(layerPath, tmpLayerPath); renameErr != nil && !os.IsNotExist(renameErr) {
|
||||
return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr)
|
||||
}
|
||||
}
|
||||
if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
|
||||
logrus.Errorf("Failed to DestroyLayer %s: %s", id, err)
|
||||
|
||||
@@ -205,8 +205,6 @@ func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) {
|
||||
if err == nil {
|
||||
if _, ok := layerRefs[l.ChainID()]; ok {
|
||||
allLayersSize += size
|
||||
} else {
|
||||
logrus.Warnf("found leaked image layer %v", l.ChainID())
|
||||
}
|
||||
} else {
|
||||
logrus.Warnf("failed to get diff size for layer %v", l.ChainID())
|
||||
|
||||
@@ -2,6 +2,7 @@ package daemon // import "github.com/docker/docker/daemon"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -61,8 +62,8 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
|
||||
ServerVersion: dockerversion.Version,
|
||||
ClusterStore: daemon.configStore.ClusterStore,
|
||||
ClusterAdvertise: daemon.configStore.ClusterAdvertise,
|
||||
HTTPProxy: sockets.GetProxyEnv("http_proxy"),
|
||||
HTTPSProxy: sockets.GetProxyEnv("https_proxy"),
|
||||
HTTPProxy: maskCredentials(sockets.GetProxyEnv("http_proxy")),
|
||||
HTTPSProxy: maskCredentials(sockets.GetProxyEnv("https_proxy")),
|
||||
NoProxy: sockets.GetProxyEnv("no_proxy"),
|
||||
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
|
||||
Isolation: daemon.defaultIsolation,
|
||||
@@ -130,6 +131,10 @@ func (daemon *Daemon) fillDriverInfo(v *types.Info) {
|
||||
if len(daemon.graphDrivers) > 1 {
|
||||
drivers += fmt.Sprintf(" (%s) ", os)
|
||||
}
|
||||
switch gd {
|
||||
case "devicemapper", "overlay":
|
||||
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: the %s storage-driver is deprecated, and will be removed in a future release.", gd))
|
||||
}
|
||||
}
|
||||
drivers = strings.TrimSpace(drivers)
|
||||
|
||||
@@ -245,3 +250,13 @@ func operatingSystem() string {
|
||||
}
|
||||
return operatingSystem
|
||||
}
|
||||
|
||||
func maskCredentials(rawURL string) string {
|
||||
parsedURL, err := url.Parse(rawURL)
|
||||
if err != nil || parsedURL.User == nil {
|
||||
return rawURL
|
||||
}
|
||||
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
|
||||
maskedURL := parsedURL.String()
|
||||
return maskedURL
|
||||
}
|
||||
|
||||
53
daemon/info_test.go
Normal file
53
daemon/info_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestMaskURLCredentials(t *testing.T) {
|
||||
tests := []struct {
|
||||
rawURL string
|
||||
maskedURL string
|
||||
}{
|
||||
{
|
||||
rawURL: "",
|
||||
maskedURL: "",
|
||||
}, {
|
||||
rawURL: "invalidURL",
|
||||
maskedURL: "invalidURL",
|
||||
}, {
|
||||
rawURL: "http://proxy.example.com:80/",
|
||||
maskedURL: "http://proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://USER:PASSWORD@proxy.example.com:80/",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://PASSWORD:PASSWORD@proxy.example.com:80/",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://USER:@proxy.example.com:80/",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://:PASSWORD@proxy.example.com:80/",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://USER@docker:password@proxy.example.com:80/",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://USER%40docker:password@proxy.example.com:80/",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://USER%40docker:pa%3Fsword@proxy.example.com:80/",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/",
|
||||
}, {
|
||||
rawURL: "http://USER%40docker:pa%3Fsword@proxy.example.com:80/hello%20world",
|
||||
maskedURL: "http://xxxxx:xxxxx@proxy.example.com:80/hello%20world",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
maskedURL := maskCredentials(test.rawURL)
|
||||
assert.Equal(t, maskedURL, test.maskedURL)
|
||||
}
|
||||
}
|
||||
@@ -29,7 +29,6 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
|
||||
v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
|
||||
v.InitBinary = daemon.configStore.GetInitPath()
|
||||
|
||||
v.RuncCommit.Expected = dockerversion.RuncCommitID
|
||||
defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path
|
||||
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
|
||||
parts := strings.Split(strings.TrimSpace(string(rv)), "\n")
|
||||
@@ -49,7 +48,10 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
|
||||
v.RuncCommit.ID = "N/A"
|
||||
}
|
||||
|
||||
v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID
|
||||
// runc is now shipped as a separate package. Set "expected" to same value
|
||||
// as "ID" to prevent clients from reporting a version-mismatch
|
||||
v.RuncCommit.Expected = v.RuncCommit.ID
|
||||
|
||||
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
|
||||
v.ContainerdCommit.ID = rv.Revision
|
||||
} else {
|
||||
@@ -57,6 +59,10 @@ func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo)
|
||||
v.ContainerdCommit.ID = "N/A"
|
||||
}
|
||||
|
||||
// containerd is now shipped as a separate package. Set "expected" to same
|
||||
// value as "ID" to prevent clients from reporting a version-mismatch
|
||||
v.ContainerdCommit.Expected = v.ContainerdCommit.ID
|
||||
|
||||
defaultInitBinary := daemon.configStore.GetInitPath()
|
||||
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
|
||||
ver, err := parseInitVersion(string(rv))
|
||||
|
||||
@@ -146,7 +146,8 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex
|
||||
continue
|
||||
}
|
||||
for _, eachName := range idNames {
|
||||
if ctx.filters.Match("name", strings.TrimPrefix(eachName, "/")) {
|
||||
// match both on container name with, and without slash-prefix
|
||||
if ctx.filters.Match("name", eachName) || ctx.filters.Match("name", strings.TrimPrefix(eachName, "/")) {
|
||||
matches[id] = true
|
||||
}
|
||||
}
|
||||
@@ -429,7 +430,7 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite
|
||||
}
|
||||
|
||||
// Do not include container if the name doesn't match
|
||||
if !ctx.filters.Match("name", strings.TrimPrefix(container.Name, "/")) {
|
||||
if !ctx.filters.Match("name", container.Name) && !ctx.filters.Match("name", strings.TrimPrefix(container.Name, "/")) {
|
||||
return excludeContainer
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -35,6 +34,7 @@ func TestMain(m *testing.M) {
|
||||
// work against it. It takes in a pointer to Daemon so that
|
||||
// minor operations are not repeated by the caller
|
||||
func setupContainerWithName(t *testing.T, name string, daemon *Daemon) *container.Container {
|
||||
t.Helper()
|
||||
var (
|
||||
id = uuid.New()
|
||||
computedImageID = digest.FromString(id)
|
||||
@@ -46,6 +46,9 @@ func setupContainerWithName(t *testing.T, name string, daemon *Daemon) *containe
|
||||
|
||||
c := container.NewBaseContainer(id, cRoot)
|
||||
// these are for passing includeContainerInList
|
||||
if name[0] != '/' {
|
||||
name = "/" + name
|
||||
}
|
||||
c.Name = name
|
||||
c.Running = true
|
||||
c.HostConfig = &containertypes.HostConfig{}
|
||||
@@ -68,7 +71,7 @@ func setupContainerWithName(t *testing.T, name string, daemon *Daemon) *containe
|
||||
func containerListContainsName(containers []*types.Container, name string) bool {
|
||||
for _, container := range containers {
|
||||
for _, containerName := range container.Names {
|
||||
if strings.TrimPrefix(containerName, "/") == name {
|
||||
if containerName == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -110,16 +113,33 @@ func TestNameFilter(t *testing.T) {
|
||||
containerList, err := d.Containers(&types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("name", "^a")),
|
||||
})
|
||||
assert.Assert(t, err == nil)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Len(containerList, 2))
|
||||
assert.Assert(t, containerListContainsName(containerList, one.Name))
|
||||
assert.Assert(t, containerListContainsName(containerList, two.Name))
|
||||
|
||||
// Same as above but with slash prefix should produce the same result
|
||||
containerListWithPrefix, err := d.Containers(&types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("name", "^/a")),
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Len(containerListWithPrefix, 2))
|
||||
assert.Assert(t, containerListContainsName(containerListWithPrefix, one.Name))
|
||||
assert.Assert(t, containerListContainsName(containerListWithPrefix, two.Name))
|
||||
|
||||
// Same as above but make sure it works for exact names
|
||||
containerList, err = d.Containers(&types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("name", "b1")),
|
||||
})
|
||||
assert.Assert(t, err == nil)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Len(containerList, 1))
|
||||
assert.Assert(t, containerListContainsName(containerList, three.Name))
|
||||
|
||||
// Same as above but with slash prefix should produce the same result
|
||||
containerListWithPrefix, err = d.Containers(&types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.Arg("name", "/b1")),
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Len(containerListWithPrefix, 1))
|
||||
assert.Assert(t, containerListContainsName(containerListWithPrefix, three.Name))
|
||||
}
|
||||
|
||||
@@ -93,21 +93,12 @@ func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher {
|
||||
|
||||
dec := logdriver.NewLogEntryDecoder(stream)
|
||||
for {
|
||||
select {
|
||||
case <-watcher.WatchClose():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
var buf logdriver.LogEntry
|
||||
if err := dec.Decode(&buf); err != nil {
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case watcher.Err <- errors.Wrap(err, "error decoding log message"):
|
||||
case <-watcher.WatchClose():
|
||||
}
|
||||
watcher.Err <- errors.Wrap(err, "error decoding log message")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -125,11 +116,10 @@ func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher {
|
||||
return
|
||||
}
|
||||
|
||||
// send the message unless the consumer is gone
|
||||
select {
|
||||
case watcher.Msg <- msg:
|
||||
case <-watcher.WatchClose():
|
||||
// make sure the message we consumed is sent
|
||||
watcher.Msg <- msg
|
||||
case <-watcher.WatchConsumerGone():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,7 +174,7 @@ func TestAdapterReadLogs(t *testing.T) {
|
||||
t.Fatal("timeout waiting for message channel to close")
|
||||
|
||||
}
|
||||
lw.Close()
|
||||
lw.ProducerGone()
|
||||
|
||||
lw = lr.ReadLogs(ReadConfig{Follow: true})
|
||||
for _, x := range testMsg {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
@@ -46,6 +47,10 @@ const (
|
||||
maximumLogEventsPerPut = 10000
|
||||
|
||||
// See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
|
||||
// Because the events are interpreted as UTF-8 encoded Unicode, invalid UTF-8 byte sequences are replaced with the
|
||||
// Unicode replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To compensate for that and to avoid
|
||||
// splitting valid UTF-8 characters into invalid byte sequences, we calculate the length of each event assuming that
|
||||
// this replacement happens.
|
||||
maximumBytesPerEvent = 262144 - perEventBytes
|
||||
|
||||
resourceAlreadyExistsCode = "ResourceAlreadyExistsException"
|
||||
@@ -495,15 +500,16 @@ func (l *logStream) collectBatch(created chan bool) {
|
||||
}
|
||||
line := msg.Line
|
||||
if l.multilinePattern != nil {
|
||||
if l.multilinePattern.Match(line) || len(eventBuffer)+len(line) > maximumBytesPerEvent {
|
||||
lineEffectiveLen := effectiveLen(string(line))
|
||||
if l.multilinePattern.Match(line) || effectiveLen(string(eventBuffer))+lineEffectiveLen > maximumBytesPerEvent {
|
||||
// This is a new log event or we will exceed max bytes per event
|
||||
// so flush the current eventBuffer to events and reset timestamp
|
||||
l.processEvent(batch, eventBuffer, eventBufferTimestamp)
|
||||
eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
|
||||
eventBuffer = eventBuffer[:0]
|
||||
}
|
||||
// Append new line if event is less than max event size
|
||||
if len(line) < maximumBytesPerEvent {
|
||||
// Append newline if event is less than max event size
|
||||
if lineEffectiveLen < maximumBytesPerEvent {
|
||||
line = append(line, "\n"...)
|
||||
}
|
||||
eventBuffer = append(eventBuffer, line...)
|
||||
@@ -524,16 +530,17 @@ func (l *logStream) collectBatch(created chan bool) {
|
||||
// batch (defined in maximumBytesPerPut). Log messages are split by the maximum
|
||||
// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event
|
||||
// byte overhead (defined in perEventBytes) which is accounted for in split- and
|
||||
// batch-calculations.
|
||||
func (l *logStream) processEvent(batch *eventBatch, events []byte, timestamp int64) {
|
||||
for len(events) > 0 {
|
||||
// batch-calculations. Because the events are interpreted as UTF-8 encoded
|
||||
// Unicode, invalid UTF-8 byte sequences are replaced with the Unicode
|
||||
// replacement character (U+FFFD), which is a 3-byte sequence in UTF-8. To
|
||||
// compensate for that and to avoid splitting valid UTF-8 characters into
|
||||
// invalid byte sequences, we calculate the length of each event assuming that
|
||||
// this replacement happens.
|
||||
func (l *logStream) processEvent(batch *eventBatch, bytes []byte, timestamp int64) {
|
||||
for len(bytes) > 0 {
|
||||
// Split line length so it does not exceed the maximum
|
||||
lineBytes := len(events)
|
||||
if lineBytes > maximumBytesPerEvent {
|
||||
lineBytes = maximumBytesPerEvent
|
||||
}
|
||||
line := events[:lineBytes]
|
||||
|
||||
splitOffset, lineBytes := findValidSplit(string(bytes), maximumBytesPerEvent)
|
||||
line := bytes[:splitOffset]
|
||||
event := wrappedEvent{
|
||||
inputLogEvent: &cloudwatchlogs.InputLogEvent{
|
||||
Message: aws.String(string(line)),
|
||||
@@ -544,7 +551,7 @@ func (l *logStream) processEvent(batch *eventBatch, events []byte, timestamp int
|
||||
|
||||
added := batch.add(event, lineBytes)
|
||||
if added {
|
||||
events = events[lineBytes:]
|
||||
bytes = bytes[splitOffset:]
|
||||
} else {
|
||||
l.publishBatch(batch)
|
||||
batch.reset()
|
||||
@@ -552,6 +559,37 @@ func (l *logStream) processEvent(batch *eventBatch, events []byte, timestamp int
|
||||
}
|
||||
}
|
||||
|
||||
// effectiveLen counts the effective number of bytes in the string, after
|
||||
// UTF-8 normalization. UTF-8 normalization includes replacing bytes that do
|
||||
// not constitute valid UTF-8 encoded Unicode codepoints with the Unicode
|
||||
// replacement codepoint U+FFFD (a 3-byte UTF-8 sequence, represented in Go as
|
||||
// utf8.RuneError)
|
||||
func effectiveLen(line string) int {
|
||||
effectiveBytes := 0
|
||||
for _, rune := range line {
|
||||
effectiveBytes += utf8.RuneLen(rune)
|
||||
}
|
||||
return effectiveBytes
|
||||
}
|
||||
|
||||
// findValidSplit finds the byte offset to split a string without breaking valid
|
||||
// Unicode codepoints given a maximum number of total bytes. findValidSplit
|
||||
// returns the byte offset for splitting a string or []byte, as well as the
|
||||
// effective number of bytes if the string were normalized to replace invalid
|
||||
// UTF-8 encoded bytes with the Unicode replacement character (a 3-byte UTF-8
|
||||
// sequence, represented in Go as utf8.RuneError)
|
||||
func findValidSplit(line string, maxBytes int) (splitOffset, effectiveBytes int) {
|
||||
for offset, rune := range line {
|
||||
splitOffset = offset
|
||||
if effectiveBytes+utf8.RuneLen(rune) > maxBytes {
|
||||
return splitOffset, effectiveBytes
|
||||
}
|
||||
effectiveBytes += utf8.RuneLen(rune)
|
||||
}
|
||||
splitOffset = len(line)
|
||||
return
|
||||
}
|
||||
|
||||
// publishBatch calls PutLogEvents for a given set of InputLogEvents,
|
||||
// accounting for sequencing requirements (each request must reference the
|
||||
// sequence token returned by the previous request).
|
||||
|
||||
@@ -938,6 +938,62 @@ func TestCollectBatchClose(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEffectiveLen(t *testing.T) {
|
||||
tests := []struct {
|
||||
str string
|
||||
effectiveBytes int
|
||||
}{
|
||||
{"Hello", 5},
|
||||
{string([]byte{1, 2, 3, 4}), 4},
|
||||
{"🙃", 4},
|
||||
{string([]byte{0xFF, 0xFF, 0xFF, 0xFF}), 12},
|
||||
{"He\xff\xffo", 9},
|
||||
{"", 0},
|
||||
}
|
||||
for i, tc := range tests {
|
||||
t.Run(fmt.Sprintf("%d/%s", i, tc.str), func(t *testing.T) {
|
||||
assert.Equal(t, tc.effectiveBytes, effectiveLen(tc.str))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindValidSplit(t *testing.T) {
|
||||
tests := []struct {
|
||||
str string
|
||||
maxEffectiveBytes int
|
||||
splitOffset int
|
||||
effectiveBytes int
|
||||
}{
|
||||
{"", 10, 0, 0},
|
||||
{"Hello", 6, 5, 5},
|
||||
{"Hello", 2, 2, 2},
|
||||
{"Hello", 0, 0, 0},
|
||||
{"🙃", 3, 0, 0},
|
||||
{"🙃", 4, 4, 4},
|
||||
{string([]byte{'a', 0xFF}), 2, 1, 1},
|
||||
{string([]byte{'a', 0xFF}), 4, 2, 4},
|
||||
}
|
||||
for i, tc := range tests {
|
||||
t.Run(fmt.Sprintf("%d/%s", i, tc.str), func(t *testing.T) {
|
||||
splitOffset, effectiveBytes := findValidSplit(tc.str, tc.maxEffectiveBytes)
|
||||
assert.Equal(t, tc.splitOffset, splitOffset, "splitOffset")
|
||||
assert.Equal(t, tc.effectiveBytes, effectiveBytes, "effectiveBytes")
|
||||
t.Log(tc.str[:tc.splitOffset])
|
||||
t.Log(tc.str[tc.splitOffset:])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessEventEmoji(t *testing.T) {
|
||||
stream := &logStream{}
|
||||
batch := &eventBatch{}
|
||||
bytes := []byte(strings.Repeat("🙃", maximumBytesPerEvent/4+1))
|
||||
stream.processEvent(batch, bytes, 0)
|
||||
assert.Equal(t, 2, len(batch.batch), "should be two events in the batch")
|
||||
assert.Equal(t, strings.Repeat("🙃", maximumBytesPerEvent/4), aws.StringValue(batch.batch[0].inputLogEvent.Message))
|
||||
assert.Equal(t, "🙃", aws.StringValue(batch.batch[1].inputLogEvent.Message))
|
||||
}
|
||||
|
||||
func TestCollectBatchLineSplit(t *testing.T) {
|
||||
mockClient := newMockClient()
|
||||
stream := &logStream{
|
||||
@@ -987,6 +1043,55 @@ func TestCollectBatchLineSplit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectBatchLineSplitWithBinary(t *testing.T) {
|
||||
mockClient := newMockClient()
|
||||
stream := &logStream{
|
||||
client: mockClient,
|
||||
logGroupName: groupName,
|
||||
logStreamName: streamName,
|
||||
sequenceToken: aws.String(sequenceToken),
|
||||
messages: make(chan *logger.Message),
|
||||
}
|
||||
mockClient.putLogEventsResult <- &putLogEventsResult{
|
||||
successResult: &cloudwatchlogs.PutLogEventsOutput{
|
||||
NextSequenceToken: aws.String(nextSequenceToken),
|
||||
},
|
||||
}
|
||||
var ticks = make(chan time.Time)
|
||||
newTicker = func(_ time.Duration) *time.Ticker {
|
||||
return &time.Ticker{
|
||||
C: ticks,
|
||||
}
|
||||
}
|
||||
|
||||
d := make(chan bool)
|
||||
close(d)
|
||||
go stream.collectBatch(d)
|
||||
|
||||
longline := strings.Repeat("\xFF", maximumBytesPerEvent/3) // 0xFF is counted as the 3-byte utf8.RuneError
|
||||
stream.Log(&logger.Message{
|
||||
Line: []byte(longline + "\xFD"),
|
||||
Timestamp: time.Time{},
|
||||
})
|
||||
|
||||
// no ticks
|
||||
stream.Close()
|
||||
|
||||
argument := <-mockClient.putLogEventsArgument
|
||||
if argument == nil {
|
||||
t.Fatal("Expected non-nil PutLogEventsInput")
|
||||
}
|
||||
if len(argument.LogEvents) != 2 {
|
||||
t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents))
|
||||
}
|
||||
if *argument.LogEvents[0].Message != longline {
|
||||
t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message)
|
||||
}
|
||||
if *argument.LogEvents[1].Message != "\xFD" {
|
||||
t.Errorf("Expected message to be %s but was %s", "\xFD", *argument.LogEvents[1].Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectBatchMaxEvents(t *testing.T) {
|
||||
mockClient := newMockClientBuffered(1)
|
||||
stream := &logStream{
|
||||
@@ -1125,6 +1230,83 @@ func TestCollectBatchMaxTotalBytes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectBatchMaxTotalBytesWithBinary(t *testing.T) {
|
||||
expectedPuts := 2
|
||||
mockClient := newMockClientBuffered(expectedPuts)
|
||||
stream := &logStream{
|
||||
client: mockClient,
|
||||
logGroupName: groupName,
|
||||
logStreamName: streamName,
|
||||
sequenceToken: aws.String(sequenceToken),
|
||||
messages: make(chan *logger.Message),
|
||||
}
|
||||
for i := 0; i < expectedPuts; i++ {
|
||||
mockClient.putLogEventsResult <- &putLogEventsResult{
|
||||
successResult: &cloudwatchlogs.PutLogEventsOutput{
|
||||
NextSequenceToken: aws.String(nextSequenceToken),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var ticks = make(chan time.Time)
|
||||
newTicker = func(_ time.Duration) *time.Ticker {
|
||||
return &time.Ticker{
|
||||
C: ticks,
|
||||
}
|
||||
}
|
||||
|
||||
d := make(chan bool)
|
||||
close(d)
|
||||
go stream.collectBatch(d)
|
||||
|
||||
// maxline is the maximum line that could be submitted after
|
||||
// accounting for its overhead.
|
||||
maxline := strings.Repeat("\xFF", (maximumBytesPerPut-perEventBytes)/3) // 0xFF is counted as the 3-byte utf8.RuneError
|
||||
// This will be split and batched up to the `maximumBytesPerPut'
|
||||
// (+/- `maximumBytesPerEvent'). This /should/ be aligned, but
|
||||
// should also tolerate an offset within that range.
|
||||
stream.Log(&logger.Message{
|
||||
Line: []byte(maxline),
|
||||
Timestamp: time.Time{},
|
||||
})
|
||||
stream.Log(&logger.Message{
|
||||
Line: []byte("B"),
|
||||
Timestamp: time.Time{},
|
||||
})
|
||||
|
||||
// no ticks, guarantee batch by size (and chan close)
|
||||
stream.Close()
|
||||
|
||||
argument := <-mockClient.putLogEventsArgument
|
||||
if argument == nil {
|
||||
t.Fatal("Expected non-nil PutLogEventsInput")
|
||||
}
|
||||
|
||||
// Should total to the maximum allowed bytes.
|
||||
eventBytes := 0
|
||||
for _, event := range argument.LogEvents {
|
||||
eventBytes += effectiveLen(*event.Message)
|
||||
}
|
||||
eventsOverhead := len(argument.LogEvents) * perEventBytes
|
||||
payloadTotal := eventBytes + eventsOverhead
|
||||
// lowestMaxBatch allows the payload to be offset if the messages
|
||||
// don't lend themselves to align with the maximum event size.
|
||||
lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent
|
||||
|
||||
if payloadTotal > maximumBytesPerPut {
|
||||
t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, payloadTotal)
|
||||
}
|
||||
if payloadTotal < lowestMaxBatch {
|
||||
t.Errorf("Batch to be no less than %d but was %d", lowestMaxBatch, payloadTotal)
|
||||
}
|
||||
|
||||
argument = <-mockClient.putLogEventsArgument
|
||||
message := *argument.LogEvents[len(argument.LogEvents)-1].Message
|
||||
if message[len(message)-1:] != "B" {
|
||||
t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectBatchWithDuplicateTimestamps(t *testing.T) {
|
||||
mockClient := newMockClient()
|
||||
stream := &logStream{
|
||||
|
||||
@@ -165,7 +165,7 @@ func (s *journald) Close() error {
|
||||
s.mu.Lock()
|
||||
s.closed = true
|
||||
for reader := range s.readers.readers {
|
||||
reader.Close()
|
||||
reader.ProducerGone()
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
@@ -299,7 +299,7 @@ func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal,
|
||||
// Wait until we're told to stop.
|
||||
select {
|
||||
case cursor = <-newCursor:
|
||||
case <-logWatcher.WatchClose():
|
||||
case <-logWatcher.WatchConsumerGone():
|
||||
// Notify the other goroutine that its work is done.
|
||||
C.close(pfd[1])
|
||||
cursor = <-newCursor
|
||||
|
||||
@@ -50,7 +50,7 @@ func New(info logger.Info) (logger.Logger, error) {
|
||||
return nil, err
|
||||
}
|
||||
if capval <= 0 {
|
||||
return nil, fmt.Errorf("max-size should be a positive numbler")
|
||||
return nil, fmt.Errorf("max-size must be a positive number")
|
||||
}
|
||||
}
|
||||
var maxFiles = 1
|
||||
@@ -166,13 +166,14 @@ func ValidateLogOpt(cfg map[string]string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes underlying file and signals all readers to stop.
|
||||
// Close closes underlying file and signals all the readers
|
||||
// that the logs producer is gone.
|
||||
func (l *JSONFileLogger) Close() error {
|
||||
l.mu.Lock()
|
||||
l.closed = true
|
||||
err := l.writer.Close()
|
||||
for r := range l.readers {
|
||||
r.Close()
|
||||
r.ProducerGone()
|
||||
delete(l.readers, r)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
@@ -50,11 +50,10 @@ func BenchmarkJSONFileLoggerReadLogs(b *testing.B) {
|
||||
}()
|
||||
|
||||
lw := jsonlogger.(*JSONFileLogger).ReadLogs(logger.ReadConfig{Follow: true})
|
||||
watchClose := lw.WatchClose()
|
||||
for {
|
||||
select {
|
||||
case <-lw.Msg:
|
||||
case <-watchClose:
|
||||
case <-lw.WatchProducerGone():
|
||||
return
|
||||
case err := <-chError:
|
||||
if err != nil {
|
||||
|
||||
@@ -166,7 +166,7 @@ func (d *driver) Close() error {
|
||||
d.closed = true
|
||||
err := d.logfile.Close()
|
||||
for r := range d.readers {
|
||||
r.Close()
|
||||
r.ProducerGone()
|
||||
delete(d.readers, r)
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
@@ -104,33 +104,50 @@ type LogWatcher struct {
|
||||
// For sending log messages to a reader.
|
||||
Msg chan *Message
|
||||
// For sending error messages that occur while while reading logs.
|
||||
Err chan error
|
||||
closeOnce sync.Once
|
||||
closeNotifier chan struct{}
|
||||
Err chan error
|
||||
producerOnce sync.Once
|
||||
producerGone chan struct{}
|
||||
consumerOnce sync.Once
|
||||
consumerGone chan struct{}
|
||||
}
|
||||
|
||||
// NewLogWatcher returns a new LogWatcher.
|
||||
func NewLogWatcher() *LogWatcher {
|
||||
return &LogWatcher{
|
||||
Msg: make(chan *Message, logWatcherBufferSize),
|
||||
Err: make(chan error, 1),
|
||||
closeNotifier: make(chan struct{}),
|
||||
Msg: make(chan *Message, logWatcherBufferSize),
|
||||
Err: make(chan error, 1),
|
||||
producerGone: make(chan struct{}),
|
||||
consumerGone: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Close notifies the underlying log reader to stop.
|
||||
func (w *LogWatcher) Close() {
|
||||
// ProducerGone notifies the underlying log reader that
|
||||
// the logs producer (a container) is gone.
|
||||
func (w *LogWatcher) ProducerGone() {
|
||||
// only close if not already closed
|
||||
w.closeOnce.Do(func() {
|
||||
close(w.closeNotifier)
|
||||
w.producerOnce.Do(func() {
|
||||
close(w.producerGone)
|
||||
})
|
||||
}
|
||||
|
||||
// WatchClose returns a channel receiver that receives notification
|
||||
// when the watcher has been closed. This should only be called from
|
||||
// one goroutine.
|
||||
func (w *LogWatcher) WatchClose() <-chan struct{} {
|
||||
return w.closeNotifier
|
||||
// WatchProducerGone returns a channel receiver that receives notification
|
||||
// once the logs producer (a container) is gone.
|
||||
func (w *LogWatcher) WatchProducerGone() <-chan struct{} {
|
||||
return w.producerGone
|
||||
}
|
||||
|
||||
// ConsumerGone notifies that the logs consumer is gone.
|
||||
func (w *LogWatcher) ConsumerGone() {
|
||||
// only close if not already closed
|
||||
w.consumerOnce.Do(func() {
|
||||
close(w.consumerGone)
|
||||
})
|
||||
}
|
||||
|
||||
// WatchConsumerGone returns a channel receiver that receives notification
|
||||
// when the log watcher consumer is gone.
|
||||
func (w *LogWatcher) WatchConsumerGone() <-chan struct{} {
|
||||
return w.consumerGone
|
||||
}
|
||||
|
||||
// Capability defines the list of capabilities that a driver can implement
|
||||
|
||||
@@ -488,7 +488,7 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, createDecoder m
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-watcher.WatchClose():
|
||||
case <-watcher.WatchConsumerGone():
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
@@ -546,22 +546,9 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
|
||||
}
|
||||
defer func() {
|
||||
f.Close()
|
||||
fileWatcher.Remove(name)
|
||||
fileWatcher.Close()
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go func() {
|
||||
select {
|
||||
case <-logWatcher.WatchClose():
|
||||
fileWatcher.Remove(name)
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
var retries int
|
||||
handleRotate := func() error {
|
||||
f.Close()
|
||||
@@ -596,7 +583,9 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
|
||||
case fsnotify.Rename, fsnotify.Remove:
|
||||
select {
|
||||
case <-notifyRotate:
|
||||
case <-ctx.Done():
|
||||
case <-logWatcher.WatchProducerGone():
|
||||
return errDone
|
||||
case <-logWatcher.WatchConsumerGone():
|
||||
return errDone
|
||||
}
|
||||
if err := handleRotate(); err != nil {
|
||||
@@ -618,7 +607,9 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
|
||||
return errRetry
|
||||
}
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
case <-logWatcher.WatchProducerGone():
|
||||
return errDone
|
||||
case <-logWatcher.WatchConsumerGone():
|
||||
return errDone
|
||||
}
|
||||
}
|
||||
@@ -664,23 +655,11 @@ func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan int
|
||||
if !until.IsZero() && msg.Timestamp.After(until) {
|
||||
return
|
||||
}
|
||||
// send the message, unless the consumer is gone
|
||||
select {
|
||||
case logWatcher.Msg <- msg:
|
||||
case <-ctx.Done():
|
||||
logWatcher.Msg <- msg
|
||||
for {
|
||||
msg, err := decodeLogLine()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !since.IsZero() && msg.Timestamp.Before(since) {
|
||||
continue
|
||||
}
|
||||
if !until.IsZero() && msg.Timestamp.After(until) {
|
||||
return
|
||||
}
|
||||
logWatcher.Msg <- msg
|
||||
}
|
||||
case <-logWatcher.WatchConsumerGone():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -74,3 +76,128 @@ func TestTailFiles(t *testing.T) {
|
||||
assert.Assert(t, string(msg.Line) == "Where we're going we don't need roads.", string(msg.Line))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowLogsConsumerGone(t *testing.T) {
|
||||
lw := logger.NewLogWatcher()
|
||||
|
||||
f, err := ioutil.TempFile("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer func() {
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
}()
|
||||
|
||||
makeDecoder := func(rdr io.Reader) func() (*logger.Message, error) {
|
||||
return func() (*logger.Message, error) {
|
||||
return &logger.Message{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
followLogsDone := make(chan struct{})
|
||||
var since, until time.Time
|
||||
go func() {
|
||||
followLogs(f, lw, make(chan interface{}), makeDecoder, since, until)
|
||||
close(followLogsDone)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-lw.Msg:
|
||||
case err := <-lw.Err:
|
||||
assert.NilError(t, err)
|
||||
case <-followLogsDone:
|
||||
t.Fatal("follow logs finished unexpectedly")
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timeout waiting for log message")
|
||||
}
|
||||
|
||||
lw.ConsumerGone()
|
||||
select {
|
||||
case <-followLogsDone:
|
||||
case <-time.After(20 * time.Second):
|
||||
t.Fatal("timeout waiting for followLogs() to finish")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowLogsProducerGone(t *testing.T) {
|
||||
lw := logger.NewLogWatcher()
|
||||
|
||||
f, err := ioutil.TempFile("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
var sent, received, closed int
|
||||
makeDecoder := func(rdr io.Reader) func() (*logger.Message, error) {
|
||||
return func() (*logger.Message, error) {
|
||||
if closed == 1 {
|
||||
closed++
|
||||
t.Logf("logDecode() closed after sending %d messages\n", sent)
|
||||
return nil, io.EOF
|
||||
} else if closed > 1 {
|
||||
t.Fatal("logDecode() called after closing!")
|
||||
return nil, io.EOF
|
||||
}
|
||||
sent++
|
||||
return &logger.Message{}, nil
|
||||
}
|
||||
}
|
||||
var since, until time.Time
|
||||
|
||||
followLogsDone := make(chan struct{})
|
||||
go func() {
|
||||
followLogs(f, lw, make(chan interface{}), makeDecoder, since, until)
|
||||
close(followLogsDone)
|
||||
}()
|
||||
|
||||
// read 1 message
|
||||
select {
|
||||
case <-lw.Msg:
|
||||
received++
|
||||
case err := <-lw.Err:
|
||||
assert.NilError(t, err)
|
||||
case <-followLogsDone:
|
||||
t.Fatal("followLogs() finished unexpectedly")
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timeout waiting for log message")
|
||||
}
|
||||
|
||||
// "stop" the "container"
|
||||
closed = 1
|
||||
lw.ProducerGone()
|
||||
|
||||
// should receive all the messages sent
|
||||
readDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(readDone)
|
||||
for {
|
||||
select {
|
||||
case <-lw.Msg:
|
||||
received++
|
||||
if received == sent {
|
||||
return
|
||||
}
|
||||
case err := <-lw.Err:
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-readDone:
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatalf("timeout waiting for log messages to be read (sent: %d, received: %d", sent, received)
|
||||
}
|
||||
|
||||
t.Logf("messages sent: %d, received: %d", sent, received)
|
||||
|
||||
// followLogs() should be done by now
|
||||
select {
|
||||
case <-followLogsDone:
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatal("timeout waiting for followLogs() to finish")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-lw.WatchConsumerGone():
|
||||
t.Fatal("consumer should not have exited")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,14 +110,16 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
|
||||
}
|
||||
}()
|
||||
}
|
||||
// set up some defers
|
||||
defer logs.Close()
|
||||
// signal that the log reader is gone
|
||||
defer logs.ConsumerGone()
|
||||
|
||||
// close the messages channel. closing is the only way to signal above
|
||||
// that we're doing with logs (other than context cancel i guess).
|
||||
defer close(messageChan)
|
||||
|
||||
lg.Debug("begin logs")
|
||||
defer lg.Debugf("end logs (%v)", ctx.Err())
|
||||
|
||||
for {
|
||||
select {
|
||||
// i do not believe as the system is currently designed any error
|
||||
@@ -132,14 +134,12 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
|
||||
}
|
||||
return
|
||||
case <-ctx.Done():
|
||||
lg.Debugf("logs: end stream, ctx is done: %v", ctx.Err())
|
||||
return
|
||||
case msg, ok := <-logs.Msg:
|
||||
// there is some kind of pool or ring buffer in the logger that
|
||||
// produces these messages, and a possible future optimization
|
||||
// might be to use that pool and reuse message objects
|
||||
if !ok {
|
||||
lg.Debug("end logs")
|
||||
return
|
||||
}
|
||||
m := msg.AsLogMessage() // just a pointer conversion, does not copy data
|
||||
|
||||
@@ -45,6 +45,7 @@ func (daemon *Daemon) Reload(conf *config.Config) (err error) {
|
||||
daemon.reloadDebug(conf, attributes)
|
||||
daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes)
|
||||
daemon.reloadShutdownTimeout(conf, attributes)
|
||||
daemon.reloadFeatures(conf, attributes)
|
||||
|
||||
if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil {
|
||||
return err
|
||||
@@ -322,3 +323,13 @@ func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attribute
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadFeatures updates configuration with enabled/disabled features
|
||||
func (daemon *Daemon) reloadFeatures(conf *config.Config, attributes map[string]string) {
|
||||
// update corresponding configuration
|
||||
// note that we allow features option to be entirely unset
|
||||
daemon.configStore.Features = conf.Features
|
||||
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["features"] = fmt.Sprintf("%v", daemon.configStore.Features)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need
|
||||
// a refactor or this function to be moved into libtrust
|
||||
func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0755, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -210,6 +210,8 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
mp.Name = v.Name
|
||||
mp.Driver = v.Driver
|
||||
|
||||
// need to selinux-relabel local mounts
|
||||
mp.Source = v.Mountpoint
|
||||
if mp.Driver == volume.DefaultDriverName {
|
||||
setBindModeIfNull(mp)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ const (
|
||||
Version = "library-import"
|
||||
BuildTime = "library-import"
|
||||
IAmStatic = "library-import"
|
||||
ContainerdCommitID = "library-import"
|
||||
RuncCommitID = "library-import"
|
||||
InitCommitID = "library-import"
|
||||
PlatformName = ""
|
||||
ProductName = ""
|
||||
|
||||
BIN
git-bundles/CVE-2019-5736.bundle
Normal file
BIN
git-bundles/CVE-2019-5736.bundle
Normal file
Binary file not shown.
997
hack/ci/windows.ps1
Normal file
997
hack/ci/windows.ps1
Normal file
@@ -0,0 +1,997 @@
|
||||
# WARNING WARNING WARNING - DO NOT EDIT THIS FILE IN JENKINS DIRECTLY.
|
||||
# SUBMIT A PR TO https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/executeCI.ps1,
|
||||
# AND MAKE SURE https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1
|
||||
# ISN'T BROKEN!!!!!!! VALIDATE USING A TEST CONTEXT IN JENKINS. THEN COPY/PASTE INTO JENKINS PRODUCTION.
|
||||
#
|
||||
# Jenkins CI scripts for Windows to Windows CI (Powershell Version)
|
||||
# By John Howard (@jhowardmsft) January 2016 - bash version; July 2016 Ported to PowerShell
|
||||
|
||||
$ErrorActionPreference = 'Stop'
|
||||
$StartTime=Get-Date
|
||||
|
||||
# Put up top to be blindingly obvious. The production jenkins.dockerproject.org Linux-container
|
||||
# CI job is "Docker-PRs-LoW-RS3". Force into LCOW mode for this run, or not.
|
||||
if ($env:BUILD_TAG -match "-LoW") { $env:LCOW_MODE=1 }
|
||||
if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" }
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------------------------
|
||||
# When executed, we rely on four variables being set in the environment:
|
||||
#
|
||||
# [The reason for being environment variables rather than parameters is historical. No reason
|
||||
# why it couldn't be updated.]
|
||||
#
|
||||
# SOURCES_DRIVE is the drive on which the sources being tested are cloned from.
|
||||
# This should be a straight drive letter, no platform semantics.
|
||||
# For example 'c'
|
||||
#
|
||||
# SOURCES_SUBDIR is the top level directory under SOURCES_DRIVE where the
|
||||
# sources are cloned to. There are no platform semantics in this
|
||||
# as it does not include slashes.
|
||||
# For example 'gopath'
|
||||
#
|
||||
# Based on the above examples, it would be expected that Jenkins
|
||||
# would clone the sources being tested to
|
||||
# SOURCES_DRIVE\SOURCES_SUBDIR\src\github.com\docker\docker, or
|
||||
# c:\gopath\src\github.com\docker\docker
|
||||
#
|
||||
# TESTRUN_DRIVE is the drive where we build the binary on and redirect everything
|
||||
# to for the daemon under test. On an Azure D2 type host which has
|
||||
# an SSD temporary storage D: drive, this is ideal for performance.
|
||||
# For example 'd'
|
||||
#
|
||||
# TESTRUN_SUBDIR is the top level directory under TESTRUN_DRIVE where we redirect
|
||||
# everything to for the daemon under test. For example 'CI'.
|
||||
# Hence, the daemon under test is run under
|
||||
# TESTRUN_DRIVE\TESTRUN_SUBDIR\CI-<CommitID> or
|
||||
# d:\CI\CI-<CommitID>
|
||||
#
|
||||
# In addition, the following variables can control the run configuration:
|
||||
#
|
||||
# DOCKER_DUT_DEBUG if defined starts the daemon under test in debug mode.
|
||||
#
|
||||
# SKIP_VALIDATION_TESTS if defined skips the validation tests
|
||||
#
|
||||
# SKIP_UNIT_TESTS if defined skips the unit tests
|
||||
#
|
||||
# SKIP_INTEGRATION_TESTS if defined skips the integration tests
|
||||
#
|
||||
# SKIP_COPY_GO if defined skips copy the go installer from the image
|
||||
#
|
||||
# DOCKER_DUT_HYPERV if default daemon under test default isolation is hyperv
|
||||
#
|
||||
# INTEGRATION_TEST_NAME to only run partial tests eg "TestInfo*" will only run
|
||||
# any tests starting "TestInfo"
|
||||
#
|
||||
# SKIP_BINARY_BUILD if defined skips building the binary
|
||||
#
|
||||
# SKIP_ZAP_DUT if defined doesn't zap the daemon under test directory
|
||||
#
|
||||
# SKIP_IMAGE_BUILD if defined doesn't build the 'docker' image
|
||||
#
|
||||
# INTEGRATION_IN_CONTAINER if defined, runs the integration tests from inside a container.
|
||||
# As of July 2016, there are known issues with this.
|
||||
#
|
||||
# SKIP_ALL_CLEANUP if defined, skips any cleanup at the start or end of the run
|
||||
#
|
||||
# WINDOWS_BASE_IMAGE if defined, uses that as the base image. Note that the
|
||||
# docker integration tests are also coded to use the same
|
||||
# environment variable, and if no set, defaults to microsoft/windowsservercore
|
||||
#
|
||||
# LCOW_BASIC_MODE if defined, does very basic LCOW verification. Ultimately we
|
||||
# want to run the entire CI suite from docker, but that's a way off.
|
||||
#
|
||||
# LCOW_MODE if defined, runs the entire CI suite
|
||||
#
|
||||
# -------------------------------------------------------------------------------------------
|
||||
#
|
||||
# Jenkins Integration. Add a Windows Powershell build step as follows:
|
||||
#
|
||||
# Write-Host -ForegroundColor green "INFO: Jenkins build step starting"
|
||||
# $CISCRIPT_DEFAULT_LOCATION = "https://raw.githubusercontent.com/jhowardmsft/docker-w2wCIScripts/master/runCI/executeCI.ps1"
|
||||
# $CISCRIPT_LOCAL_LOCATION = "$env:TEMP\executeCI.ps1"
|
||||
# Write-Host -ForegroundColor green "INFO: Removing cached execution script"
|
||||
# Remove-Item $CISCRIPT_LOCAL_LOCATION -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
# $wc = New-Object net.webclient
|
||||
# try {
|
||||
# Write-Host -ForegroundColor green "INFO: Downloading latest execution script..."
|
||||
# $wc.Downloadfile($CISCRIPT_DEFAULT_LOCATION, $CISCRIPT_LOCAL_LOCATION)
|
||||
# }
|
||||
# catch [System.Net.WebException]
|
||||
# {
|
||||
# Throw ("Failed to download: $_")
|
||||
# }
|
||||
# & $CISCRIPT_LOCAL_LOCATION
|
||||
# -------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
$SCRIPT_VER="28-Aug-2018 09:33 PDT"
|
||||
$FinallyColour="Cyan"
|
||||
|
||||
#$env:DOCKER_DUT_DEBUG="yes" # Comment out to not be in debug mode
|
||||
#$env:SKIP_UNIT_TESTS="yes"
|
||||
#$env:SKIP_VALIDATION_TESTS="yes"
|
||||
#$env:SKIP_ZAP_DUT=""
|
||||
#$env:SKIP_BINARY_BUILD="yes"
|
||||
#$env:INTEGRATION_TEST_NAME=""
|
||||
#$env:SKIP_IMAGE_BUILD="yes"
|
||||
#$env:SKIP_ALL_CLEANUP="yes"
|
||||
#$env:INTEGRATION_IN_CONTAINER="yes"
|
||||
#$env:WINDOWS_BASE_IMAGE=""
|
||||
#$env:SKIP_COPY_GO="yes"
|
||||
|
||||
Function Nuke-Everything {
|
||||
$ErrorActionPreference = 'SilentlyContinue'
|
||||
|
||||
try {
|
||||
|
||||
if ($env:SKIP_ALL_CLEANUP -eq $null) {
|
||||
Write-Host -ForegroundColor green "INFO: Nuke-Everything..."
|
||||
$containerCount = ($(docker ps -aq | Measure-Object -line).Lines)
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw "ERROR: Failed to get container count from control daemon while nuking"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor green "INFO: Container count on control daemon to delete is $containerCount"
|
||||
if ($(docker ps -aq | Measure-Object -line).Lines -gt 0) {
|
||||
docker rm -f $(docker ps -aq)
|
||||
}
|
||||
$imageCount=($(docker images --format "{{.Repository}}:{{.ID}}" | `
|
||||
select-string -NotMatch "windowsservercore" | `
|
||||
select-string -NotMatch "nanoserver" | `
|
||||
select-string -NotMatch "docker" | `
|
||||
Measure-Object -line).Lines)
|
||||
if ($imageCount -gt 0) {
|
||||
Write-Host -Foregroundcolor green "INFO: Non-base image count on control daemon to delete is $imageCount"
|
||||
docker rmi -f `
|
||||
$(docker images --format "{{.Repository}}:{{.ID}}" | `
|
||||
select-string -NotMatch "windowsservercore" | `
|
||||
select-string -NotMatch "nanoserver" | `
|
||||
select-string -NotMatch "docker").ToString().Split(":")[1]
|
||||
}
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping cleanup of images and containers"
|
||||
}
|
||||
|
||||
# Kill any spurious daemons. The '-' is IMPORTANT otherwise will kill the control daemon!
|
||||
$pids=$(get-process | where-object {$_.ProcessName -like 'dockerd-*'}).id
|
||||
foreach ($p in $pids) {
|
||||
Write-Host "INFO: Killing daemon with PID $p"
|
||||
Stop-Process -Id $p -Force -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
if ($pidFile -ne $Null) {
|
||||
Write-Host "INFO: Tidying pidfile $pidfile"
|
||||
if (Test-Path $pidFile) {
|
||||
$p=Get-Content $pidFile -raw
|
||||
if ($p -ne $null){
|
||||
Write-Host -ForegroundColor green "INFO: Stopping possible daemon pid $p"
|
||||
taskkill -f -t -pid $p
|
||||
}
|
||||
Remove-Item "$env:TEMP\docker.pid" -force -ErrorAction SilentlyContinue
|
||||
}
|
||||
}
|
||||
|
||||
Stop-Process -name "cc1" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "link" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "compile" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "ld" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "go" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "git" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "git-remote-https" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "integration-cli.test" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
Stop-Process -name "tail" -Force -ErrorAction SilentlyContinue 2>&1 | Out-Null
|
||||
|
||||
# Detach any VHDs
|
||||
gwmi msvm_mountedstorageimage -namespace root/virtualization/v2 -ErrorAction SilentlyContinue | foreach-object {$_.DetachVirtualHardDisk() }
|
||||
|
||||
# Stop any compute processes
|
||||
Get-ComputeProcess | Stop-ComputeProcess -Force
|
||||
|
||||
# Delete the directory using our dangerous utility unless told not to
|
||||
if (Test-Path "$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR") {
|
||||
if (($env:SKIP_ZAP_DUT -ne $null) -or ($env:SKIP_ALL_CLEANUP -eq $null)) {
|
||||
Write-Host -ForegroundColor Green "INFO: Nuking $env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR"
|
||||
docker-ci-zap "-folder=$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skip nuking $env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR"
|
||||
}
|
||||
}
|
||||
|
||||
# TODO: This should be able to be removed in August 2017 update. Only needed for RS1 Production Server workaround - Psched
|
||||
$reg = "HKLM:\System\CurrentControlSet\Services\Psched\Parameters\NdisAdapters"
|
||||
$count=(Get-ChildItem $reg | Measure-Object).Count
|
||||
if ($count -gt 0) {
|
||||
Write-Warning "There are $count NdisAdapters leaked under Psched\Parameters"
|
||||
if ($env:COMPUTERNAME -match "jenkins-rs1-") {
|
||||
Write-Warning "Cleaning Psched..."
|
||||
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
} else {
|
||||
Write-Warning "Not cleaning as not a production RS1 server"
|
||||
}
|
||||
}
|
||||
|
||||
# TODO: This should be able to be removed in August 2017 update. Only needed for RS1
|
||||
$reg = "HKLM:\System\CurrentControlSet\Services\WFPLWFS\Parameters\NdisAdapters"
|
||||
$count=(Get-ChildItem $reg | Measure-Object).Count
|
||||
if ($count -gt 0) {
|
||||
Write-Warning "There are $count NdisAdapters leaked under WFPLWFS\Parameters"
|
||||
if ($env:COMPUTERNAME -match "jenkins-rs1-") {
|
||||
Write-Warning "Cleaning WFPLWFS..."
|
||||
Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
} else {
|
||||
Write-Warning "Not cleaning as not a production RS1 server"
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
# Don't throw any errors onwards Throw $_
|
||||
}
|
||||
}
|
||||
|
||||
Try {
|
||||
Write-Host -ForegroundColor Cyan "`nINFO: executeCI.ps1 starting at $(date)`n"
|
||||
Write-Host -ForegroundColor Green "INFO: Script version $SCRIPT_VER"
|
||||
Set-PSDebug -Trace 0 # 1 to turn on
|
||||
$origPath="$env:PATH" # so we can restore it at the end
|
||||
$origDOCKER_HOST="$DOCKER_HOST" # So we can restore it at the end
|
||||
$origGOROOT="$env:GOROOT" # So we can restore it at the end
|
||||
$origGOPATH="$env:GOPATH" # So we can restore it at the end
|
||||
|
||||
# Turn off progress bars
|
||||
$origProgressPreference=$global:ProgressPreference
|
||||
$global:ProgressPreference='SilentlyContinue'
|
||||
|
||||
# Git version
|
||||
Write-Host -ForegroundColor Green "INFO: Running $(git version)"
|
||||
|
||||
# OS Version
|
||||
$bl=(Get-ItemProperty -Path "Registry::HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion" -Name BuildLabEx).BuildLabEx
|
||||
$a=$bl.ToString().Split(".")
|
||||
$Branch=$a[3]
|
||||
$WindowsBuild=$a[0]+"."+$a[1]+"."+$a[4]
|
||||
Write-Host -ForegroundColor green "INFO: Branch:$Branch Build:$WindowsBuild"
|
||||
|
||||
# List the environment variables
|
||||
Write-Host -ForegroundColor green "INFO: Environment variables:"
|
||||
Get-ChildItem Env: | Out-String
|
||||
|
||||
# PR
|
||||
if (-not ($env:PR -eq $Null)) { echo "INFO: PR#$env:PR (https://github.com/docker/docker/pull/$env:PR)" }
|
||||
|
||||
# Make sure docker is installed
|
||||
if ((Get-Command "docker" -ErrorAction SilentlyContinue) -eq $null) { Throw "ERROR: docker is not installed or not found on path" }
|
||||
|
||||
# Make sure docker-ci-zap is installed
|
||||
if ((Get-Command "docker-ci-zap" -ErrorAction SilentlyContinue) -eq $null) { Throw "ERROR: docker-ci-zap is not installed or not found on path" }
|
||||
|
||||
# Make sure SOURCES_DRIVE is set
|
||||
if ($env:SOURCES_DRIVE -eq $Null) { Throw "ERROR: Environment variable SOURCES_DRIVE is not set" }
|
||||
|
||||
# Make sure TESTRUN_DRIVE is set
|
||||
if ($env:TESTRUN_DRIVE -eq $Null) { Throw "ERROR: Environment variable TESTRUN_DRIVE is not set" }
|
||||
|
||||
# Make sure SOURCES_SUBDIR is set
|
||||
if ($env:SOURCES_SUBDIR -eq $Null) { Throw "ERROR: Environment variable SOURCES_SUBDIR is not set" }
|
||||
|
||||
# Make sure TESTRUN_SUBDIR is set
|
||||
if ($env:TESTRUN_SUBDIR -eq $Null) { Throw "ERROR: Environment variable TESTRUN_SUBDIR is not set" }
|
||||
|
||||
# SOURCES_DRIVE\SOURCES_SUBDIR must be a directory and exist
|
||||
if (-not (Test-Path -PathType Container "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR")) { Throw "ERROR: $env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR must be an existing directory" }
|
||||
|
||||
# Create the TESTRUN_DRIVE\TESTRUN_SUBDIR if it does not already exist
|
||||
New-Item -ItemType Directory -Force -Path "$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR" -ErrorAction SilentlyContinue | Out-Null
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Sources under $env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\..."
|
||||
Write-Host -ForegroundColor Green "INFO: Test run under $env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\..."
|
||||
|
||||
# Check the intended source location is a directory
|
||||
if (-not (Test-Path -PathType Container "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker" -ErrorAction SilentlyContinue)) {
|
||||
Throw "ERROR: $env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker is not a directory!"
|
||||
}
|
||||
|
||||
# Make sure we start at the root of the sources
|
||||
cd "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker"
|
||||
Write-Host -ForegroundColor Green "INFO: Running in $(pwd)"
|
||||
|
||||
# Make sure we are in repo
|
||||
if (-not (Test-Path -PathType Leaf -Path ".\Dockerfile.windows")) {
|
||||
Throw "$(pwd) does not contain Dockerfile.windows!"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: docker/docker repository was found"
|
||||
|
||||
# Make sure microsoft/windowsservercore:latest image is installed in the control daemon. On public CI machines, windowsservercore.tar and nanoserver.tar
|
||||
# are pre-baked and tagged appropriately in the c:\baseimages directory, and can be directly loaded.
|
||||
# Note - this script will only work on 10B (Oct 2016) or later machines! Not 9D or previous due to image tagging assumptions.
|
||||
#
|
||||
# On machines not on Microsoft corpnet, or those which have not been pre-baked, we have to docker pull the image in which case it will
|
||||
# will come in directly as microsoft/windowsservercore:latest. The ultimate goal of all this code is to ensure that whatever,
|
||||
# we have microsoft/windowsservercore:latest
|
||||
#
|
||||
# Note we cannot use (as at Oct 2016) nanoserver as the control daemons base image, even if nanoserver is used in the tests themselves.
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$ControlDaemonBaseImage="windowsservercore"
|
||||
|
||||
$readBaseFrom="c"
|
||||
if ($((docker images --format "{{.Repository}}:{{.Tag}}" | Select-String $("microsoft/"+$ControlDaemonBaseImage+":latest") | Measure-Object -Line).Lines) -eq 0) {
|
||||
# Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
|
||||
# either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
|
||||
if (Test-Path $("$env:SOURCES_DRIVE`:\baseimages\"+$ControlDaemonBaseImage+".tar")) {
|
||||
|
||||
# An optimization for CI servers to copy it to the D: drive which is an SSD.
|
||||
if ($env:SOURCES_DRIVE -ne $env:TESTRUN_DRIVE) {
|
||||
$readBaseFrom=$env:TESTRUN_DRIVE
|
||||
if (!(Test-Path "$env:TESTRUN_DRIVE`:\baseimages")) {
|
||||
New-Item "$env:TESTRUN_DRIVE`:\baseimages" -type directory | Out-Null
|
||||
}
|
||||
if (!(Test-Path "$env:TESTRUN_DRIVE`:\baseimages\windowsservercore.tar")) {
|
||||
if (Test-Path "$env:SOURCES_DRIVE`:\baseimages\windowsservercore.tar") {
|
||||
Write-Host -ForegroundColor Green "INFO: Optimisation - copying $env:SOURCES_DRIVE`:\baseimages\windowsservercore.tar to $env:TESTRUN_DRIVE`:\baseimages"
|
||||
Copy-Item "$env:SOURCES_DRIVE`:\baseimages\windowsservercore.tar" "$env:TESTRUN_DRIVE`:\baseimages"
|
||||
}
|
||||
}
|
||||
if (!(Test-Path "$env:TESTRUN_DRIVE`:\baseimages\nanoserver.tar")) {
|
||||
if (Test-Path "$env:SOURCES_DRIVE`:\baseimages\nanoserver.tar") {
|
||||
Write-Host -ForegroundColor Green "INFO: Optimisation - copying $env:SOURCES_DRIVE`:\baseimages\nanoserver.tar to $env:TESTRUN_DRIVE`:\baseimages"
|
||||
Copy-Item "$env:SOURCES_DRIVE`:\baseimages\nanoserver.tar" "$env:TESTRUN_DRIVE`:\baseimages"
|
||||
}
|
||||
}
|
||||
$readBaseFrom=$env:TESTRUN_DRIVE
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Loading"$ControlDaemonBaseImage".tar from disk. This may take some time..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker load -i $("$readBaseFrom`:\baseimages\"+$ControlDaemonBaseImage+".tar")
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to load $readBaseFrom`:\baseimages\"+$ControlDaemonBaseImage+".tar")
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: docker load of"$ControlDaemonBaseImage" completed successfully"
|
||||
} else {
|
||||
# We need to docker pull it instead. It will come in directly as microsoft/imagename:latest
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling microsoft/"+$ControlDaemonBaseImage+":latest from docker hub. This may take some time...")
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker pull $("microsoft/"+$ControlDaemonBaseImage)
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to docker pull microsoft/"+$ControlDaemonBaseImage+":latest.")
|
||||
}
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of microsoft/"+$ControlDaemonBaseImage+":latest completed successfully")
|
||||
}
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Image"$("microsoft/"+$ControlDaemonBaseImage+":latest")"is already loaded in the control daemon"
|
||||
}
|
||||
|
||||
# Inspect the pulled image to get the version directly
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$imgVersion = $(docker inspect $("microsoft/"+$ControlDaemonBaseImage) --format "{{.OsVersion}}")
|
||||
$ErrorActionPreference = "Stop"
|
||||
Write-Host -ForegroundColor Green $("INFO: Version of microsoft/"+$ControlDaemonBaseImage+":latest is '"+$imgVersion+"'")
|
||||
|
||||
# Provide the docker version for debugging purposes.
|
||||
Write-Host -ForegroundColor Green "INFO: Docker version of control daemon"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker version
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Write-Host
|
||||
Write-Host -ForegroundColor Green "---------------------------------------------------------------------------"
|
||||
Write-Host -ForegroundColor Green " Failed to get a response from the control daemon. It may be down."
|
||||
Write-Host -ForegroundColor Green " Try re-running this CI job, or ask on #docker-maintainers on docker slack"
|
||||
Write-Host -ForegroundColor Green " to see if the the daemon is running. Also check the service configuration."
|
||||
Write-Host -ForegroundColor Green " DOCKER_HOST is set to $DOCKER_HOST."
|
||||
Write-Host -ForegroundColor Green "---------------------------------------------------------------------------"
|
||||
Write-Host
|
||||
Throw "ERROR: The control daemon does not appear to be running."
|
||||
}
|
||||
Write-Host
|
||||
|
||||
# Same as above, but docker info
|
||||
Write-Host -ForegroundColor Green "INFO: Docker info of control daemon"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker info
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: The control daemon does not appear to be running."
|
||||
}
|
||||
Write-Host
|
||||
|
||||
# Get the commit has and verify we have something
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$COMMITHASH=$(git rev-parse --short HEAD)
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to get commit hash. Are you sure this is a docker repository?"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Commit hash is $COMMITHASH"
|
||||
|
||||
# Nuke everything and go back to our sources after
|
||||
Nuke-Everything
|
||||
cd "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker"
|
||||
|
||||
# Redirect to a temporary location.
|
||||
$TEMPORIG=$env:TEMP
|
||||
$env:TEMP="$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\CI-$COMMITHASH"
|
||||
$env:LOCALAPPDATA="$TEMP\localappdata"
|
||||
$errorActionPreference='Stop'
|
||||
New-Item -ItemType Directory "$env:TEMP" -ErrorAction SilentlyContinue | Out-Null
|
||||
New-Item -ItemType Directory "$env:TEMP\userprofile" -ErrorAction SilentlyContinue | Out-Null
|
||||
New-Item -ItemType Directory "$env:TEMP\localappdata" -ErrorAction SilentlyContinue | Out-Null
|
||||
New-Item -ItemType Directory "$env:TEMP\binary" -ErrorAction SilentlyContinue | Out-Null
|
||||
New-Item -ItemType Directory "$env:TEMP\installer" -ErrorAction SilentlyContinue | Out-Null
|
||||
if ($env:SKIP_COPY_GO -eq $null) {
|
||||
# Wipe the previous version of GO - we're going to get it out of the image
|
||||
if (Test-Path "$env:TEMP\go") { Remove-Item "$env:TEMP\go" -Recurse -Force -ErrorAction SilentlyContinue | Out-Null }
|
||||
New-Item -ItemType Directory "$env:TEMP\go" -ErrorAction SilentlyContinue | Out-Null
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Location for testing is $env:TEMP"
|
||||
|
||||
# CI Integrity check - ensure Dockerfile.windows and Dockerfile go versions match
|
||||
$goVersionDockerfileWindows=$(Get-Content ".\Dockerfile.windows" | Select-String "^ENV GO_VERSION").ToString().Replace("ENV GO_VERSION=","").Replace("\","").Replace("``","").Trim()
|
||||
$goVersionDockerfile=$(Get-Content ".\Dockerfile" | Select-String "^ENV GO_VERSION")
|
||||
|
||||
# As of go 1.11, Dockerfile changed to be in the format like "FROM golang:1.11.0 AS base".
|
||||
# If a version number ends with .0 (as in 1.11.0, a convention used in golang docker
|
||||
# image versions), it needs to be removed (i.e. "1.11.0" becomes "1.11").
|
||||
if ($goVersionDockerfile -eq $Null) {
|
||||
$goVersionDockerfile=$(Get-Content ".\Dockerfile" | Select-String "^FROM golang:")
|
||||
if ($goVersionDockerfile -ne $Null) {
|
||||
$goVersionDockerfile = $goVersionDockerfile.ToString().Split(" ")[1].Split(":")[1] -replace '\.0$',''
|
||||
}
|
||||
} else {
|
||||
$goVersionDockerfile = $goVersionDockerfile.ToString().Split(" ")[2]
|
||||
}
|
||||
if ($goVersionDockerfile -eq $Null) {
|
||||
Throw "ERROR: Failed to extract golang version from Dockerfile"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Validating GOLang consistency in Dockerfile.windows..."
|
||||
if (-not ($goVersionDockerfile -eq $goVersionDockerfileWindows)) {
|
||||
Throw "ERROR: Mismatched GO versions between Dockerfile and Dockerfile.windows. Update your PR to ensure that both files are updated and in sync. $goVersionDockerfile $goVersionDockerfileWindows"
|
||||
}
|
||||
|
||||
# Build the image
|
||||
if ($env:SKIP_IMAGE_BUILD -eq $null) {
|
||||
Write-Host -ForegroundColor Cyan "`n`nINFO: Building the image from Dockerfile.windows at $(Get-Date)..."
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$Duration=$(Measure-Command { docker build -t docker -f Dockerfile.windows . | Out-Host })
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to build image from Dockerfile.windows"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Image build ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping building the docker image"
|
||||
}
|
||||
|
||||
# Following at the moment must be docker\docker as it's dictated by dockerfile.Windows
|
||||
$contPath="$COMMITHASH`:c`:\go\src\github.com\docker\docker\bundles"
|
||||
|
||||
# After https://github.com/docker/docker/pull/30290, .git was added to .dockerignore. Therefore
|
||||
# we have to calculate unsupported outside of the container, and pass the commit ID in through
|
||||
# an environment variable for the binary build
|
||||
$CommitUnsupported=""
|
||||
if ($(git status --porcelain --untracked-files=no).Length -ne 0) {
|
||||
$CommitUnsupported="-unsupported"
|
||||
}
|
||||
|
||||
# Build the binary in a container unless asked to skip it.
|
||||
if ($env:SKIP_BINARY_BUILD -eq $null) {
|
||||
Write-Host -ForegroundColor Cyan "`n`nINFO: Building the test binaries at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker rm -f $COMMITHASH 2>&1 | Out-Null
|
||||
if ($CommitUnsupported -ne "") {
|
||||
Write-Host ""
|
||||
Write-Warning "This version is unsupported because there are uncommitted file(s)."
|
||||
Write-Warning "Either commit these changes, or add them to .gitignore."
|
||||
git status --porcelain --untracked-files=no | Write-Warning
|
||||
Write-Host ""
|
||||
}
|
||||
$Duration=$(Measure-Command {docker run --name $COMMITHASH -e DOCKER_GITCOMMIT=$COMMITHASH$CommitUnsupported docker hack\make.ps1 -Daemon -Client | Out-Host })
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to build binary"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Binaries build ended at $(Get-Date). Duration`:$Duration"
|
||||
|
||||
# Copy the binaries and the generated version_autogen.go out of the container
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker cp "$contPath\docker.exe" $env:TEMP\binary\
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to docker cp the client binary (docker.exe) to $env:TEMP\binary"
|
||||
}
|
||||
docker cp "$contPath\dockerd.exe" $env:TEMP\binary\
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to docker cp the daemon binary (dockerd.exe) to $env:TEMP\binary"
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Copy the built dockerd.exe to dockerd-$COMMITHASH.exe so that easily spotted in task manager.
|
||||
Write-Host -ForegroundColor Green "INFO: Copying the built daemon binary to $env:TEMP\binary\dockerd-$COMMITHASH.exe..."
|
||||
Copy-Item $env:TEMP\binary\dockerd.exe $env:TEMP\binary\dockerd-$COMMITHASH.exe -Force -ErrorAction SilentlyContinue
|
||||
|
||||
# Copy the built docker.exe to docker-$COMMITHASH.exe
|
||||
Write-Host -ForegroundColor Green "INFO: Copying the built client binary to $env:TEMP\binary\docker-$COMMITHASH.exe..."
|
||||
Copy-Item $env:TEMP\binary\docker.exe $env:TEMP\binary\docker-$COMMITHASH.exe -Force -ErrorAction SilentlyContinue
|
||||
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping building the binaries"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Copying dockerversion from the container..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
docker cp "$contPath\..\dockerversion\version_autogen.go" "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\dockerversion"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to docker cp the generated version_autogen.go to $env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\dockerversion"
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Grab the golang installer out of the built image. That way, we know we are consistent once extracted and paths set,
|
||||
# so there's no need to re-deploy on account of an upgrade to the version of GO being used in docker.
|
||||
if ($env:SKIP_COPY_GO -eq $null) {
|
||||
Write-Host -ForegroundColor Green "INFO: Copying the golang package from the container to $env:TEMP\installer\go.zip..."
|
||||
docker cp "$COMMITHASH`:c`:\go.zip" $env:TEMP\installer\
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to docker cp the golang installer 'go.zip' from container:c:\go.zip to $env:TEMP\installer"
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Extract the golang installer
|
||||
Write-Host -ForegroundColor Green "INFO: Extracting go.zip to $env:TEMP\go"
|
||||
$Duration=$(Measure-Command { Expand-Archive $env:TEMP\installer\go.zip $env:TEMP -Force | Out-Null})
|
||||
Write-Host -ForegroundColor Green "INFO: Extraction ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping copying and extracting golang from the image"
|
||||
}
|
||||
|
||||
# Set the GOPATH
|
||||
Write-Host -ForegroundColor Green "INFO: Updating the golang and path environment variables"
|
||||
$env:GOPATH="$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR"
|
||||
Write-Host -ForegroundColor Green "INFO: GOPATH=$env:GOPATH"
|
||||
|
||||
# Set the path to have the version of go from the image at the front
|
||||
$env:PATH="$env:TEMP\go\bin;$env:PATH"
|
||||
|
||||
# Set the GOROOT to be our copy of go from the image
|
||||
$env:GOROOT="$env:TEMP\go"
|
||||
Write-Host -ForegroundColor Green "INFO: $(go version)"
|
||||
|
||||
# Work out the the -H parameter for the daemon under test (DASHH_DUT) and client under test (DASHH_CUT)
|
||||
#$DASHH_DUT="npipe:////./pipe/$COMMITHASH" # Can't do remote named pipe
|
||||
#$ip = (resolve-dnsname $env:COMPUTERNAME -type A -NoHostsFile -LlmnrNetbiosOnly).IPAddress # Useful to tie down
|
||||
$DASHH_CUT="tcp://127.0.0.1`:2357" # Not a typo for 2375!
|
||||
$DASHH_DUT="tcp://0.0.0.0:2357" # Not a typo for 2375!
|
||||
|
||||
# Arguments for the daemon under test
|
||||
$dutArgs=@()
|
||||
$dutArgs += "-H $DASHH_DUT"
|
||||
$dutArgs += "--data-root $env:TEMP\daemon"
|
||||
$dutArgs += "--pidfile $env:TEMP\docker.pid"
|
||||
|
||||
# Save the PID file so we can nuke it if set
|
||||
$pidFile="$env:TEMP\docker.pid"
|
||||
|
||||
# Arguments: Are we starting the daemon under test in debug mode?
|
||||
if (-not ("$env:DOCKER_DUT_DEBUG" -eq "")) {
|
||||
Write-Host -ForegroundColor Green "INFO: Running the daemon under test in debug mode"
|
||||
$dutArgs += "-D"
|
||||
}
|
||||
|
||||
# Arguments: Are we starting the daemon under test with Hyper-V containers as the default isolation?
|
||||
if (-not ("$env:DOCKER_DUT_HYPERV" -eq "")) {
|
||||
Write-Host -ForegroundColor Green "INFO: Running the daemon under test with Hyper-V containers as the default"
|
||||
$dutArgs += "--exec-opt isolation=hyperv"
|
||||
}
|
||||
|
||||
# Start the daemon under test, ensuring everything is redirected to folders under $TEMP.
|
||||
# Important - we launch the -$COMMITHASH version so that we can kill it without
|
||||
# killing the control daemon.
|
||||
Write-Host -ForegroundColor Green "INFO: Starting a daemon under test..."
|
||||
Write-Host -ForegroundColor Green "INFO: Args: $dutArgs"
|
||||
New-Item -ItemType Directory $env:TEMP\daemon -ErrorAction SilentlyContinue | Out-Null
|
||||
|
||||
# In LCOW mode, for now we need to set an environment variable before starting the daemon under test
|
||||
if (($env:LCOW_MODE -ne $Null) -or ($env:LCOW_BASIC_MODE -ne $Null)) {
|
||||
$env:LCOW_SUPPORTED=1
|
||||
}
|
||||
|
||||
# Cannot fathom why, but always writes to stderr....
|
||||
Start-Process "$env:TEMP\binary\dockerd-$COMMITHASH" `
|
||||
-ArgumentList $dutArgs `
|
||||
-RedirectStandardOutput "$env:TEMP\dut.out" `
|
||||
-RedirectStandardError "$env:TEMP\dut.err"
|
||||
Write-Host -ForegroundColor Green "INFO: Process started successfully."
|
||||
$daemonStarted=1
|
||||
|
||||
# In LCOW mode, turn off that variable
|
||||
if (($env:LCOW_MODE -ne $Null) -or ($env:LCOW_BASIC_MODE -ne $Null)) {
|
||||
$env:LCOW_SUPPORTED=""
|
||||
}
|
||||
|
||||
|
||||
# Start tailing the daemon under test if the command is installed
|
||||
if ((Get-Command "tail" -ErrorAction SilentlyContinue) -ne $null) {
|
||||
$tail = start-process "tail" -ArgumentList "-f $env:TEMP\dut.out" -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
# Verify we can get the daemon under test to respond
|
||||
$tries=20
|
||||
Write-Host -ForegroundColor Green "INFO: Waiting for the daemon under test to start..."
|
||||
while ($true) {
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" version 2>&1 | Out-Null
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -eq 0) {
|
||||
break
|
||||
}
|
||||
|
||||
$tries--
|
||||
if ($tries -le 0) {
|
||||
$DumpDaemonLog=1
|
||||
Throw "ERROR: Failed to get a response from the daemon under test"
|
||||
}
|
||||
Write-Host -NoNewline "."
|
||||
sleep 1
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Daemon under test started and replied!"
|
||||
|
||||
# Provide the docker version of the daemon under test for debugging purposes.
|
||||
Write-Host -ForegroundColor Green "INFO: Docker version of the daemon under test"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" version
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -ne 0) {
|
||||
Throw "ERROR: The daemon under test does not appear to be running."
|
||||
$DumpDaemonLog=1
|
||||
}
|
||||
Write-Host
|
||||
|
||||
# Same as above but docker info
|
||||
Write-Host -ForegroundColor Green "INFO: Docker info of the daemon under test"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" info
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -ne 0) {
|
||||
Throw "ERROR: The daemon under test does not appear to be running."
|
||||
$DumpDaemonLog=1
|
||||
}
|
||||
Write-Host
|
||||
|
||||
# Same as above but docker images
|
||||
Write-Host -ForegroundColor Green "INFO: Docker images of the daemon under test"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -ne 0) {
|
||||
Throw "ERROR: The daemon under test does not appear to be running."
|
||||
$DumpDaemonLog=1
|
||||
}
|
||||
Write-Host
|
||||
|
||||
# Don't need Windows images when in LCOW mode.
|
||||
if (($env:LCOW_MODE -eq $Null) -and ($env:LCOW_BASIC_MODE -eq $Null)) {
|
||||
|
||||
# Default to windowsservercore for the base image used for the tests. The "docker" image
|
||||
# and the control daemon use microsoft/windowsservercore regardless. This is *JUST* for the tests.
|
||||
if ($env:WINDOWS_BASE_IMAGE -eq $Null) {
|
||||
$env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore"
|
||||
}
|
||||
|
||||
# Lowercase and make sure it has a microsoft/ prefix
|
||||
$env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower()
|
||||
if ($($env:WINDOWS_BASE_IMAGE -Split "/")[0] -ne "microsoft") {
|
||||
Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/"
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE"
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String $($env:WINDOWS_BASE_IMAGE+":latest") | Measure-Object -Line).Lines) -eq 0) {
|
||||
# Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk,
|
||||
# either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location.
|
||||
if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) {
|
||||
Write-Host -ForegroundColor Green "INFO: Loading"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]".tar from disk into the daemon under test. This may take some time..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" load -i $("$readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to load $readBaseFrom`:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar into daemon under test")
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully"
|
||||
} else {
|
||||
# We need to docker pull it instead. It will come in directly as microsoft/imagename:latest
|
||||
Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":latest from docker hub into daemon under test. This may take some time...")
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull $($env:WINDOWS_BASE_IMAGE)
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not $LastExitCode -eq 0) {
|
||||
Throw $("ERROR: Failed to docker pull "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test.")
|
||||
}
|
||||
Write-Host -ForegroundColor Green $("INFO: docker pull of "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test completed successfully")
|
||||
}
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Image"$($env:WINDOWS_BASE_IMAGE+":latest")"is already loaded in the daemon under test"
|
||||
}
|
||||
|
||||
|
||||
# Inspect the pulled or loaded image to get the version directly
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect $($env:WINDOWS_BASE_IMAGE) --format "{{.OsVersion}}")
|
||||
$ErrorActionPreference = "Stop"
|
||||
Write-Host -ForegroundColor Green $("INFO: Version of "+$env:WINDOWS_BASE_IMAGE+":latest is '"+$dutimgVersion+"'")
|
||||
}
|
||||
|
||||
# Run the validation tests unless SKIP_VALIDATION_TESTS is defined.
|
||||
if ($env:SKIP_VALIDATION_TESTS -eq $null) {
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running validation tests at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$Duration=$(Measure-Command { hack\make.ps1 -DCO -GoFormat -PkgImports | Out-Host })
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Validation tests failed"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Validation tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping validation tests"
|
||||
}
|
||||
|
||||
# Note the unit tests won't work in LCOW mode as I turned off loading the base images above.
|
||||
# Run the unit tests inside a container unless SKIP_UNIT_TESTS is defined
|
||||
if (($env:LCOW_MODE -eq $Null) -and ($env:LCOW_BASIC_MODE -eq $Null)) {
|
||||
if ($env:SKIP_UNIT_TESTS -eq $null) {
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running unit tests at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
$Duration=$(Measure-Command {docker run -e DOCKER_GITCOMMIT=$COMMITHASH$CommitUnsupported docker hack\make.ps1 -TestUnit | Out-Host })
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Unit tests failed"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Unit tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping unit tests"
|
||||
}
|
||||
}
|
||||
|
||||
# Add the Windows busybox image. Needed for WCOW integration tests
|
||||
if (($env:LCOW_MODE -eq $Null) -and ($env:LCOW_BASIC_MODE -eq $Null)) {
|
||||
if ($env:SKIP_INTEGRATION_TESTS -eq $null) {
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
# Build it regardless while switching between nanoserver and windowsservercore
|
||||
#$bbCount = $(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images | Select-String "busybox" | Measure-Object -line).Lines
|
||||
#$ErrorActionPreference = "Stop"
|
||||
#if (-not($LastExitCode -eq 0)) {
|
||||
# Throw "ERROR: Could not determine if busybox image is present"
|
||||
#}
|
||||
#if ($bbCount -eq 0) {
|
||||
Write-Host -ForegroundColor Green "INFO: Building busybox"
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
|
||||
# This is a temporary hack for nanoserver
|
||||
if ($env:WINDOWS_BASE_IMAGE -ne "microsoft/windowsservercore") {
|
||||
Write-Host -ForegroundColor Red "HACK HACK HACK - Building 64-bit nanoserver busybox image"
|
||||
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox64/v1.0/Dockerfile | Out-Host)
|
||||
} else {
|
||||
$(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/v1.0/Dockerfile | Out-Host)
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Failed to build busybox image"
|
||||
}
|
||||
#}
|
||||
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: Docker images of the daemon under test"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -ne 0) {
|
||||
Throw "ERROR: The daemon under test does not appear to be running."
|
||||
$DumpDaemonLog=1
|
||||
}
|
||||
Write-Host
|
||||
}
|
||||
}
|
||||
|
||||
# Run the WCOW integration tests unless SKIP_INTEGRATION_TESTS is defined
|
||||
if (($env:LCOW_MODE -eq $Null) -and ($env:LCOW_BASIC_MODE -eq $Null)) {
|
||||
if ($env:SKIP_INTEGRATION_TESTS -eq $null) {
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running integration tests at $(Get-Date)..."
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
|
||||
# Location of the daemon under test.
|
||||
$env:OrigDOCKER_HOST="$env:DOCKER_HOST"
|
||||
|
||||
#https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
|
||||
$c = "go test "
|
||||
$c += "`"-check.v`" "
|
||||
if ($env:INTEGRATION_TEST_NAME -ne $null) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
|
||||
$c += "`"-check.f`" "
|
||||
$c += "`"$env:INTEGRATION_TEST_NAME`" "
|
||||
Write-Host -ForegroundColor Magenta "WARN: Only running integration tests matching $env:INTEGRATION_TEST_NAME"
|
||||
}
|
||||
$c += "`"-tags`" " + "`"autogen`" "
|
||||
$c += "`"-check.timeout`" " + "`"10m`" "
|
||||
$c += "`"-test.timeout`" " + "`"200m`" "
|
||||
|
||||
if ($env:INTEGRATION_IN_CONTAINER -ne $null) {
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests being run inside a container"
|
||||
# Note we talk back through the containers gateway address
|
||||
# And the ridiculous lengths we have to go to to get the default gateway address... (GetNetIPConfiguration doesn't work in nanoserver)
|
||||
# I just could not get the escaping to work in a single command, so output $c to a file and run that in the container instead...
|
||||
# Not the prettiest, but it works.
|
||||
$c | Out-File -Force "$env:TEMP\binary\runIntegrationCLI.ps1"
|
||||
$Duration= $(Measure-Command { & docker run `
|
||||
--rm `
|
||||
-e c=$c `
|
||||
--workdir "c`:\go\src\github.com\docker\docker\integration-cli" `
|
||||
-v "$env:TEMP\binary`:c:\target" `
|
||||
docker `
|
||||
"`$env`:PATH`='c`:\target;'+`$env:PATH`; `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
|
||||
} else {
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests being run from the host:"
|
||||
cd "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
|
||||
$env:DOCKER_HOST=$DASHH_CUT
|
||||
$env:PATH="$env:TEMP\binary;$env:PATH;" # Force to use the test binaries, not the host ones.
|
||||
Write-Host -ForegroundColor Green "INFO: $c"
|
||||
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Integration tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping integration tests"
|
||||
}
|
||||
} else {
|
||||
# The LCOW version of the tests here
|
||||
if ($env:SKIP_INTEGRATION_TESTS -eq $null) {
|
||||
Write-Host -ForegroundColor Cyan "INFO: Running LCOW tests at $(Get-Date)..."
|
||||
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
|
||||
# Location of the daemon under test.
|
||||
$env:OrigDOCKER_HOST="$env:DOCKER_HOST"
|
||||
|
||||
# Make sure we are pointing at the DUT
|
||||
$env:DOCKER_HOST=$DASHH_CUT
|
||||
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
|
||||
|
||||
# Force to use the test binaries, not the host ones.
|
||||
$env:PATH="$env:TEMP\binary;$env:PATH;"
|
||||
|
||||
if ($env:LCOW_BASIC_MODE -ne $null) {
|
||||
$wc = New-Object net.webclient
|
||||
try {
|
||||
Write-Host -ForegroundColor green "INFO: Downloading latest execution script..."
|
||||
$wc.Downloadfile("https://raw.githubusercontent.com/jhowardmsft/docker-w2wCIScripts/master/runCI/lcowbasicvalidation.ps1", "$env:TEMP\binary\lcowbasicvalidation.ps1")
|
||||
}
|
||||
catch [System.Net.WebException]
|
||||
{
|
||||
Throw ("Failed to download: $_")
|
||||
}
|
||||
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$ErrorActionPreference = "Stop"
|
||||
$start=(Get-Date); Invoke-Expression "powershell $env:TEMP\binary\lcowbasicvalidation.ps1"; $lec=$lastExitCode; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
$Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
Write-Host -ForegroundColor Green "INFO: LCOW tests ended at $(Get-Date). Duration`:$Duration"
|
||||
if ($lec -ne 0) {
|
||||
Throw "LCOW validation tests failed"
|
||||
}
|
||||
} else {
|
||||
#https://blogs.technet.microsoft.com/heyscriptingguy/2011/09/20/solve-problems-with-external-command-lines-in-powershell/ is useful to see tokenising
|
||||
$c = "go test "
|
||||
$c += "`"-check.v`" "
|
||||
if ($env:INTEGRATION_TEST_NAME -ne $null) { # Makes is quicker for debugging to be able to run only a subset of the integration tests
|
||||
$c += "`"-check.f`" "
|
||||
$c += "`"$env:INTEGRATION_TEST_NAME`" "
|
||||
Write-Host -ForegroundColor Magenta "WARN: Only running LCOW integration tests matching $env:INTEGRATION_TEST_NAME"
|
||||
}
|
||||
$c += "`"-tags`" " + "`"autogen`" "
|
||||
$c += "`"-check.timeout`" " + "`"10m`" "
|
||||
$c += "`"-test.timeout`" " + "`"200m`" "
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: LCOW Integration tests being run from the host:"
|
||||
cd "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
|
||||
Write-Host -ForegroundColor Green "INFO: $c"
|
||||
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
|
||||
# Explicit to not use measure-command otherwise don't get output as it goes
|
||||
$start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
|
||||
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
if (-not($LastExitCode -eq 0)) {
|
||||
Throw "ERROR: Integration tests failed at $(Get-Date). Duration`:$Duration"
|
||||
}
|
||||
Write-Host -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
|
||||
} else {
|
||||
Write-Host -ForegroundColor Magenta "WARN: Skipping LCOW tests"
|
||||
}
|
||||
}
|
||||
|
||||
# Docker info now to get counts (after or if jjh/containercounts is merged)
|
||||
if ($daemonStarted -eq 1) {
|
||||
Write-Host -ForegroundColor Green "INFO: Docker info of the daemon under test at end of run"
|
||||
Write-Host
|
||||
$ErrorActionPreference = "SilentlyContinue"
|
||||
& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" info
|
||||
$ErrorActionPreference = "Stop"
|
||||
if ($LastExitCode -ne 0) {
|
||||
Throw "ERROR: The daemon under test does not appear to be running."
|
||||
$DumpDaemonLog=1
|
||||
}
|
||||
Write-Host
|
||||
}
|
||||
|
||||
# Stop the daemon under test
|
||||
if (Test-Path "$env:TEMP\docker.pid") {
|
||||
$p=Get-Content "$env:TEMP\docker.pid" -raw
|
||||
if (($p -ne $null) -and ($daemonStarted -eq 1)) {
|
||||
Write-Host -ForegroundColor green "INFO: Stopping daemon under test"
|
||||
taskkill -f -t -pid $p
|
||||
#sleep 5
|
||||
}
|
||||
Remove-Item "$env:TEMP\docker.pid" -force -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
Write-Host -ForegroundColor Green "INFO: executeCI.ps1 Completed successfully at $(Get-Date)."
|
||||
}
|
||||
Catch [Exception] {
|
||||
$FinallyColour="Red"
|
||||
Write-Host -ForegroundColor Red ("`r`n`r`nERROR: Failed '$_' at $(Get-Date)")
|
||||
Write-Host -ForegroundColor Red ($_.InvocationInfo.PositionMessage)
|
||||
Write-Host "`n`n"
|
||||
|
||||
# Exit to ensure Jenkins captures it. Don't do this in the ISE or interactive Powershell - they will catch the Throw onwards.
|
||||
if ( ([bool]([Environment]::GetCommandLineArgs() -Like '*-NonInteractive*')) -and `
|
||||
([bool]([Environment]::GetCommandLineArgs() -NotLike "*Powershell_ISE.exe*"))) {
|
||||
exit 1
|
||||
}
|
||||
Throw $_
|
||||
}
|
||||
Finally {
|
||||
$ErrorActionPreference="SilentlyContinue"
|
||||
$global:ProgressPreference=$origProgressPreference
|
||||
Write-Host -ForegroundColor Green "INFO: Tidying up at end of run"
|
||||
|
||||
# Restore the path
|
||||
if ($origPath -ne $null) { $env:PATH=$origPath }
|
||||
|
||||
# Restore the DOCKER_HOST
|
||||
if ($origDOCKER_HOST -ne $null) { $env:DOCKER_HOST=$origDOCKER_HOST }
|
||||
|
||||
# Restore the GOROOT and GOPATH variables
|
||||
if ($origGOROOT -ne $null) { $env:GOROOT=$origGOROOT }
|
||||
if ($origGOPATH -ne $null) { $env:GOPATH=$origGOPATH }
|
||||
|
||||
# Dump the daemon log if asked to
|
||||
if ($daemonStarted -eq 1) {
|
||||
if ($dumpDaemonLog -eq 1) {
|
||||
Write-Host -ForegroundColor Cyan "----------- DAEMON LOG ------------"
|
||||
Get-Content "$env:TEMP\dut.err" -ErrorAction SilentlyContinue | Write-Host -ForegroundColor Cyan
|
||||
Write-Host -ForegroundColor Cyan "----------- END DAEMON LOG --------"
|
||||
}
|
||||
}
|
||||
|
||||
# Save the daemon under test log
|
||||
if ($daemonStarted -eq 1) {
|
||||
Write-Host -ForegroundColor Green "INFO: Saving daemon under test log ($env:TEMP\dut.err) to $TEMPORIG\CIDUT.log"
|
||||
Copy-Item "$env:TEMP\dut.err" "$TEMPORIG\CIDUT.log" -Force -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
cd "$env:SOURCES_DRIVE\$env:SOURCES_SUBDIR" -ErrorAction SilentlyContinue
|
||||
Nuke-Everything
|
||||
$Dur=New-TimeSpan -Start $StartTime -End $(Get-Date)
|
||||
Write-Host -ForegroundColor $FinallyColour "`nINFO: executeCI.ps1 exiting at $(date). Duration $dur`n"
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
# containerd is also pinned in vendor.conf. When updating the binary
|
||||
# version you may also need to update the vendor version to pick up bug
|
||||
# fixes or new APIs.
|
||||
CONTAINERD_COMMIT=468a545b9edcd5932818eb9de8e72413e616e86e # v1.1.2
|
||||
CONTAINERD_COMMIT=9754871865f7fe2f4e74d43e2fc7ccd237edcbce # v1.2.2
|
||||
|
||||
install_containerd() {
|
||||
echo "Install containerd version $CONTAINERD_COMMIT"
|
||||
@@ -30,7 +30,7 @@ install_containerd() {
|
||||
|
||||
mkdir -p ${PREFIX}
|
||||
|
||||
cp bin/containerd ${PREFIX}/docker-containerd
|
||||
cp bin/containerd-shim ${PREFIX}/docker-containerd-shim
|
||||
cp bin/ctr ${PREFIX}/docker-containerd-ctr
|
||||
cp bin/containerd ${PREFIX}/containerd
|
||||
cp bin/containerd-shim ${PREFIX}/containerd-shim
|
||||
cp bin/ctr ${PREFIX}/ctr
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# LIBNETWORK_COMMIT is used to build the docker-userland-proxy binary. When
|
||||
# updating the binary version, consider updating github.com/docker/libnetwork
|
||||
# in vendor.conf accordingly
|
||||
LIBNETWORK_COMMIT=f30a35b091cc2a431ef9856c75c343f75bb5f2e2
|
||||
LIBNETWORK_COMMIT=2cfbf9b1f98162a55829a21cc603c76072a75382 # bump_18.09 branch
|
||||
|
||||
install_proxy() {
|
||||
case "$1" in
|
||||
|
||||
@@ -1,22 +1,45 @@
|
||||
#!/bin/sh
|
||||
|
||||
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
|
||||
RUNC_COMMIT=69663f0bd4b60df09991c08812a60108003fa340
|
||||
# The version of runc should match the version that is used by the containerd
|
||||
# version that is used. If you need to update runc, open a pull request in
|
||||
# the containerd project first, and update both after that is merged.
|
||||
RUNC_COMMIT=96ec2177ae841256168fcf76954f7177af9446eb
|
||||
RUNC_OVERRIDE_COMMIT=09c8266bf2fcf9519a651b04ae54c967b9ab86ec
|
||||
RUNC_BUNDLE=/go/src/github.com/docker/docker/git-bundles/CVE-2019-5736.bundle
|
||||
|
||||
install_runc() {
|
||||
# Do not build with ambient capabilities support
|
||||
RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp apparmor selinux"}"
|
||||
# If using RHEL7 kernels (3.10.0 el7), disable kmem accounting/limiting
|
||||
if uname -r | grep -q '^3\.10\.0.*\.el7\.'; then
|
||||
: ${RUNC_NOKMEM='nokmem'}
|
||||
fi
|
||||
|
||||
echo "Install runc version $RUNC_COMMIT"
|
||||
# Do not build with ambient capabilities support
|
||||
RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp apparmor selinux $RUNC_NOKMEM"}"
|
||||
|
||||
echo "Install runc version $RUNC_COMMIT (build tags: $RUNC_BUILDTAGS)"
|
||||
git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc"
|
||||
cd "$GOPATH/src/github.com/opencontainers/runc"
|
||||
git checkout -q "$RUNC_COMMIT"
|
||||
|
||||
if [ -f "$RUNC_BUNDLE" ];then
|
||||
git bundle unbundle "$RUNC_BUNDLE"
|
||||
git checkout -q "$RUNC_OVERRIDE_COMMIT"
|
||||
if [ "$(git rev-parse HEAD)" != "$RUNC_OVERRIDE_COMMIT" ]; then
|
||||
echo "ERROR: Commit with bundle does not match override commit"
|
||||
echo " $(git rev-parse HEAD) != '$RUNC_OVERRIDE_COMMIT'"
|
||||
exit 1
|
||||
fi
|
||||
RUNC_COMMIT=$RUNC_OVERRIDE_COMMIT
|
||||
fi
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
target=static
|
||||
else
|
||||
target="$1"
|
||||
fi
|
||||
make BUILDTAGS="$RUNC_BUILDTAGS" "$target"
|
||||
OVERRIDE_VERSION="1.0.0-rc6+dev.docker-18.09"
|
||||
make BUILDTAGS="$RUNC_BUILDTAGS" COMMIT="$RUNC_COMMIT" VERSION="$OVERRIDE_VERSION" "$target"
|
||||
mkdir -p ${PREFIX}
|
||||
cp runc ${PREFIX}/docker-runc
|
||||
cp runc ${PREFIX}/runc
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DOCKER_DAEMON_BINARY_NAME='dockerd'
|
||||
DOCKER_RUNC_BINARY_NAME='docker-runc'
|
||||
DOCKER_CONTAINERD_BINARY_NAME='docker-containerd'
|
||||
DOCKER_CONTAINERD_CTR_BINARY_NAME='docker-containerd-ctr'
|
||||
DOCKER_CONTAINERD_SHIM_BINARY_NAME='docker-containerd-shim'
|
||||
DOCKER_RUNC_BINARY_NAME='runc'
|
||||
DOCKER_CONTAINERD_BINARY_NAME='containerd'
|
||||
DOCKER_CONTAINERD_CTR_BINARY_NAME='ctr'
|
||||
DOCKER_CONTAINERD_SHIM_BINARY_NAME='containerd-shim'
|
||||
DOCKER_PROXY_BINARY_NAME='docker-proxy'
|
||||
DOCKER_INIT_BINARY_NAME='docker-init'
|
||||
|
||||
@@ -19,7 +19,6 @@ const (
|
||||
Version string = "$VERSION"
|
||||
BuildTime string = "$BUILDTIME"
|
||||
IAmStatic string = "${IAMSTATIC:-true}"
|
||||
ContainerdCommitID string = "${CONTAINERD_COMMIT}"
|
||||
PlatformName string = "${PLATFORM}"
|
||||
ProductName string = "${PRODUCT}"
|
||||
DefaultProductLicense string = "${DEFAULT_PRODUCT_LICENSE}"
|
||||
@@ -37,7 +36,6 @@ package dockerversion
|
||||
// Default build-time variable for library-import.
|
||||
// This file is overridden on build with build-time informations.
|
||||
const (
|
||||
RuncCommitID string = "${RUNC_COMMIT}"
|
||||
InitCommitID string = "${TINI_COMMIT}"
|
||||
)
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ error_on_leaked_containerd_shims() {
|
||||
fi
|
||||
|
||||
leftovers=$(ps -ax -o pid,cmd |
|
||||
awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
|
||||
awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
|
||||
if [ -n "$leftovers" ]; then
|
||||
ps aux
|
||||
kill -9 $leftovers 2> /dev/null
|
||||
|
||||
@@ -10,14 +10,14 @@ copy_binaries() {
|
||||
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
|
||||
return
|
||||
fi
|
||||
if [ ! -x /usr/local/bin/docker-runc ]; then
|
||||
if [ ! -x /usr/local/bin/runc ]; then
|
||||
return
|
||||
fi
|
||||
echo "Copying nested executables into $dir"
|
||||
for file in containerd containerd-shim containerd-ctr runc init proxy; do
|
||||
cp -f `which "docker-$file"` "$dir/"
|
||||
for file in containerd containerd-shim ctr runc docker-init docker-proxy; do
|
||||
cp -f `which "$file"` "$dir/"
|
||||
if [ "$hash" == "hash" ]; then
|
||||
hash_files "$dir/docker-$file"
|
||||
hash_files "$dir/$file"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
privateRegistryURL = registry.DefaultURL
|
||||
|
||||
// path to containerd's ctr binary
|
||||
ctrBinary = "docker-containerd-ctr"
|
||||
ctrBinary = "ctr"
|
||||
|
||||
// the docker daemon binary to use
|
||||
dockerdBinary = "dockerd"
|
||||
|
||||
@@ -1759,7 +1759,7 @@ func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) {
|
||||
Target: destPath}}},
|
||||
msg: "source path does not exist",
|
||||
// FIXME(vdemeester) fails into e2e, migrate to integration/container anyway
|
||||
// msg: "bind mount source path does not exist: " + notExistPath,
|
||||
// msg: "source path does not exist: " + notExistPath,
|
||||
},
|
||||
{
|
||||
config: containertypes.Config{
|
||||
|
||||
@@ -44,6 +44,8 @@ import (
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
const containerdSocket = "/var/run/docker/containerd/containerd.sock"
|
||||
|
||||
// TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon
|
||||
// command. Remove this test when we remove this.
|
||||
func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) {
|
||||
@@ -1449,7 +1451,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec
|
||||
c.Assert(d.Kill(), check.IsNil)
|
||||
|
||||
// kill the container
|
||||
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
icmd.RunCommand(ctrBinary, "--address", containerdSocket,
|
||||
"--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
|
||||
|
||||
// restart daemon.
|
||||
@@ -1971,7 +1973,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check
|
||||
}
|
||||
|
||||
// kill the container
|
||||
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
icmd.RunCommand(ctrBinary, "--address", containerdSocket,
|
||||
"--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
|
||||
|
||||
// Give time to containerd to process the command if we don't
|
||||
@@ -2074,7 +2076,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *che
|
||||
// resume the container
|
||||
result := icmd.RunCommand(
|
||||
ctrBinary,
|
||||
"--address", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
"--address", containerdSocket,
|
||||
"--namespace", moby_daemon.ContainersNamespace,
|
||||
"tasks", "resume", cid)
|
||||
result.Assert(t, icmd.Success)
|
||||
@@ -2409,7 +2411,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) {
|
||||
{
|
||||
"runtimes": {
|
||||
"oci": {
|
||||
"path": "docker-runc"
|
||||
"path": "runc"
|
||||
},
|
||||
"vm": {
|
||||
"path": "/usr/local/bin/vm-manager",
|
||||
@@ -2491,7 +2493,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) {
|
||||
"default-runtime": "vm",
|
||||
"runtimes": {
|
||||
"oci": {
|
||||
"path": "docker-runc"
|
||||
"path": "runc"
|
||||
},
|
||||
"vm": {
|
||||
"path": "/usr/local/bin/vm-manager",
|
||||
@@ -2517,7 +2519,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) {
|
||||
s.d.StartWithBusybox(c, "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
s.d.StartWithBusybox(c, "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
|
||||
// Run with default runtime
|
||||
out, err := s.d.Cmd("run", "--rm", "busybox", "ls")
|
||||
@@ -2564,7 +2566,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) {
|
||||
|
||||
// Check that we can select a default runtime
|
||||
s.d.Stop(c)
|
||||
s.d.StartWithBusybox(c, "--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
s.d.StartWithBusybox(c, "--default-runtime=vm", "--add-runtime", "oci=runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
|
||||
|
||||
out, err = s.d.Cmd("run", "--rm", "busybox", "ls")
|
||||
c.Assert(err, check.NotNil, check.Commentf("%s", out))
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
dclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/internal/test/fakecontext"
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -19,7 +21,11 @@ import (
|
||||
)
|
||||
|
||||
func TestBuildWithSession(t *testing.T) {
|
||||
skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild)
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
d := daemon.New(t, daemon.WithExperimental)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
|
||||
client := testEnv.APIClient()
|
||||
|
||||
@@ -76,7 +82,7 @@ func TestBuildWithSession(t *testing.T) {
|
||||
assert.Check(t, is.Contains(string(outBytes), "Successfully built"))
|
||||
assert.Check(t, is.Equal(strings.Count(string(outBytes), "Using cache"), 4))
|
||||
|
||||
_, err = client.BuildCachePrune(context.TODO())
|
||||
_, err = client.BuildCachePrune(context.TODO(), types.BuildCachePruneOptions{All: true})
|
||||
assert.Check(t, err)
|
||||
|
||||
du, err = client.DiskUsage(context.TODO())
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration/internal/container"
|
||||
"github.com/docker/docker/internal/test/daemon"
|
||||
"github.com/docker/docker/internal/test/fakecontext"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"gotest.tools/assert"
|
||||
@@ -18,7 +19,12 @@ import (
|
||||
)
|
||||
|
||||
func TestBuildSquashParent(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild)
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
d := daemon.New(t, daemon.WithExperimental)
|
||||
d.StartWithBusybox(t)
|
||||
defer d.Stop(t)
|
||||
|
||||
client := testEnv.APIClient()
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBuildWithRemoveAndForceRemove(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
defer setupTest(t)()
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
@@ -137,6 +139,7 @@ func buildContainerIdsFilter(buildOutput io.Reader) (filters.Args, error) {
|
||||
|
||||
func TestBuildMultiStageParentConfig(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.35"), "broken in earlier versions")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
dockerfile := `
|
||||
FROM busybox AS stage0
|
||||
ENV WHO=parent
|
||||
@@ -166,16 +169,27 @@ func TestBuildMultiStageParentConfig(t *testing.T) {
|
||||
resp.Body.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
imgs, err := apiclient.ImageList(ctx, types.ImageListOptions{})
|
||||
assert.NilError(t, err)
|
||||
t.Log(imgs)
|
||||
|
||||
image, _, err := apiclient.ImageInspectWithRaw(ctx, "build1")
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Check(t, is.Equal("/foo/sub2", image.Config.WorkingDir))
|
||||
expected := "/foo/sub2"
|
||||
if testEnv.DaemonInfo.OSType == "windows" {
|
||||
expected = `C:\foo\sub2`
|
||||
}
|
||||
assert.Check(t, is.Equal(expected, image.Config.WorkingDir))
|
||||
assert.Check(t, is.Contains(image.Config.Env, "WHO=parent"))
|
||||
}
|
||||
|
||||
// Test cases in #36996
|
||||
func TestBuildLabelWithTargets(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), "test added after 1.38")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
bldName := "build-a"
|
||||
testLabels := map[string]string{
|
||||
"foo": "bar",
|
||||
@@ -282,6 +296,7 @@ func TestBuildWithEmptyLayers(t *testing.T) {
|
||||
// #35652
|
||||
func TestBuildMultiStageOnBuild(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.33"), "broken in earlier versions")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
defer setupTest(t)()
|
||||
// test both metadata and layer based commands as they may be implemented differently
|
||||
dockerfile := `FROM busybox AS stage1
|
||||
@@ -289,7 +304,8 @@ ONBUILD RUN echo 'foo' >somefile
|
||||
ONBUILD ENV bar=baz
|
||||
|
||||
FROM stage1
|
||||
RUN cat somefile # fails if ONBUILD RUN fails
|
||||
# fails if ONBUILD RUN fails
|
||||
RUN cat somefile
|
||||
|
||||
FROM stage1
|
||||
RUN cat somefile`
|
||||
@@ -327,6 +343,8 @@ RUN cat somefile`
|
||||
// #35403 #36122
|
||||
func TestBuildUncleanTarFilenames(t *testing.T) {
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.37"), "broken in earlier versions")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
|
||||
@@ -385,6 +403,7 @@ COPY bar /`
|
||||
// docker/for-linux#135
|
||||
// #35641
|
||||
func TestBuildMultiStageLayerLeak(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
|
||||
skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.37"), "broken in earlier versions")
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
@@ -423,6 +442,39 @@ RUN [ ! -f foo ]
|
||||
assert.Check(t, is.Contains(out.String(), "Successfully built"))
|
||||
}
|
||||
|
||||
// #37581
|
||||
func TestBuildWithHugeFile(t *testing.T) {
|
||||
skip.If(t, testEnv.OSType == "windows")
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
|
||||
dockerfile := `FROM busybox
|
||||
# create a sparse file with size over 8GB
|
||||
RUN for g in $(seq 0 8); do dd if=/dev/urandom of=rnd bs=1K count=1 seek=$((1024*1024*g)) status=none; done && \
|
||||
ls -la rnd && du -sk rnd`
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tar.NewWriter(buf)
|
||||
writeTarRecord(t, w, "Dockerfile", dockerfile)
|
||||
err := w.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
apiclient := testEnv.APIClient()
|
||||
resp, err := apiclient.ImageBuild(ctx,
|
||||
buf,
|
||||
types.ImageBuildOptions{
|
||||
Remove: true,
|
||||
ForceRemove: true,
|
||||
})
|
||||
|
||||
out := bytes.NewBuffer(nil)
|
||||
assert.NilError(t, err)
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
resp.Body.Close()
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Contains(out.String(), "Successfully built"))
|
||||
}
|
||||
|
||||
func writeTarRecord(t *testing.T, w *tar.Writer, fn, contents string) {
|
||||
err := w.WriteHeader(&tar.Header{
|
||||
Name: fn,
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
func TestConfigList(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
@@ -102,7 +102,7 @@ func createConfig(ctx context.Context, t *testing.T, client client.APIClient, na
|
||||
}
|
||||
|
||||
func TestConfigsCreateAndDelete(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
@@ -130,7 +130,7 @@ func TestConfigsCreateAndDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigsUpdate(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
@@ -184,6 +184,7 @@ func TestConfigsUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTemplatedConfig(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
defer d.Stop(t)
|
||||
client := d.NewClientT(t)
|
||||
@@ -323,7 +324,7 @@ func waitAndAssert(t *testing.T, timeout time.Duration, f func(*testing.T) bool)
|
||||
}
|
||||
|
||||
func TestConfigInspect(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType != "linux")
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows")
|
||||
|
||||
defer setupTest(t)()
|
||||
d := swarm.NewSwarm(t, testEnv)
|
||||
|
||||
42
integration/container/container_test.go
Normal file
42
integration/container/container_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package container // import "github.com/docker/docker/integration/container"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/internal/test/request"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func TestContainerInvalidJSON(t *testing.T) {
|
||||
defer setupTest(t)()
|
||||
|
||||
endpoints := []string{
|
||||
"/containers/foobar/copy",
|
||||
"/containers/foobar/exec",
|
||||
"/exec/foobar/start",
|
||||
}
|
||||
|
||||
for _, ep := range endpoints {
|
||||
t.Run(ep, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
res, body, err := request.Post(ep, request.RawString("{invalid json"), request.JSON)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, res.StatusCode, http.StatusBadRequest)
|
||||
|
||||
buf, err := request.ReadBody(body)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Contains(string(buf), "invalid character 'i' looking for beginning of object key string"))
|
||||
|
||||
res, body, err = request.Post(ep, request.JSON)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, res.StatusCode, http.StatusBadRequest)
|
||||
|
||||
buf, err = request.ReadBody(body)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Contains(string(buf), "got EOF while reading request body"))
|
||||
})
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user