mirror of
https://github.com/moby/moby.git
synced 2026-01-12 03:01:38 +00:00
Compare commits
16 Commits
master
...
v17.12.0-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5acfe70329 | ||
|
|
3b26399210 | ||
|
|
5d76d4c74f | ||
|
|
65a847368e | ||
|
|
2c94aa8a93 | ||
|
|
032f6b8b44 | ||
|
|
aece6e8c55 | ||
|
|
de79f28886 | ||
|
|
8df578a0fe | ||
|
|
63061a7d55 | ||
|
|
46cfb10bdf | ||
|
|
31005179e3 | ||
|
|
31375e2216 | ||
|
|
5d3a4855f9 | ||
|
|
91cbf939ab | ||
|
|
4b1ee4fda2 |
@@ -92,6 +92,8 @@ func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.
|
||||
flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication")
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// TODO: Remove this flag after 3 release cycles (18.03)
|
||||
flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries")
|
||||
flags.MarkHidden("disable-legacy-registry")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -472,8 +473,15 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !conf.V2Only {
|
||||
logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`)
|
||||
if runtime.GOOS != "windows" {
|
||||
if flags.Changed("disable-legacy-registry") {
|
||||
// TODO: Remove this error after 3 release cycles (18.03)
|
||||
return nil, errors.New("ERROR: The '--disable-legacy-registry' flag has been removed. Interacting with legacy (v1) registries is no longer supported")
|
||||
}
|
||||
if !conf.V2Only {
|
||||
// TODO: Remove this error after 3 release cycles (18.03)
|
||||
return nil, errors.New("ERROR: The 'disable-legacy-registry' configuration option has been removed. Interacting with legacy (v1) registries is no longer supported")
|
||||
}
|
||||
}
|
||||
|
||||
if flags.Changed("graph") {
|
||||
|
||||
@@ -97,15 +97,3 @@ func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) {
|
||||
|
||||
assert.True(t, loadedConfig.EnableUserlandProxy)
|
||||
}
|
||||
|
||||
func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) {
|
||||
content := `{"disable-legacy-registry": false}`
|
||||
tempFile := fs.NewFile(t, "config", fs.WithContent(content))
|
||||
defer tempFile.Remove()
|
||||
|
||||
opts := defaultOptions(tempFile.Path())
|
||||
loadedConfig, err := loadDaemonCliConfig(opts)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, loadedConfig)
|
||||
assert.False(t, loadedConfig.V2Only)
|
||||
}
|
||||
|
||||
@@ -62,8 +62,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MainNamespace is the name of the namespace used for users containers
|
||||
const MainNamespace = "moby"
|
||||
// ContainersNamespace is the name of the namespace used for users containers
|
||||
const ContainersNamespace = "moby"
|
||||
|
||||
var (
|
||||
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
|
||||
@@ -247,6 +247,11 @@ func (daemon *Daemon) restore() error {
|
||||
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
|
||||
return
|
||||
}
|
||||
} else if !daemon.configStore.LiveRestoreEnabled {
|
||||
if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
|
||||
logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if c.IsRunning() || c.IsPaused() {
|
||||
@@ -317,24 +322,24 @@ func (daemon *Daemon) restore() error {
|
||||
activeSandboxes[c.NetworkSettings.SandboxID] = options
|
||||
mapLock.Unlock()
|
||||
}
|
||||
} else {
|
||||
// get list of containers we need to restart
|
||||
}
|
||||
|
||||
// Do not autostart containers which
|
||||
// has endpoints in a swarm scope
|
||||
// network yet since the cluster is
|
||||
// not initialized yet. We will start
|
||||
// it after the cluster is
|
||||
// initialized.
|
||||
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
|
||||
mapLock.Lock()
|
||||
restartContainers[c] = make(chan struct{})
|
||||
mapLock.Unlock()
|
||||
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
|
||||
mapLock.Lock()
|
||||
removeContainers[c.ID] = c
|
||||
mapLock.Unlock()
|
||||
}
|
||||
// get list of containers we need to restart
|
||||
|
||||
// Do not autostart containers which
|
||||
// has endpoints in a swarm scope
|
||||
// network yet since the cluster is
|
||||
// not initialized yet. We will start
|
||||
// it after the cluster is
|
||||
// initialized.
|
||||
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
|
||||
mapLock.Lock()
|
||||
restartContainers[c] = make(chan struct{})
|
||||
mapLock.Unlock()
|
||||
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
|
||||
mapLock.Lock()
|
||||
removeContainers[c.ID] = c
|
||||
mapLock.Unlock()
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
@@ -890,7 +895,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
|
||||
|
||||
go d.execCommandGC()
|
||||
|
||||
d.containerd, err = containerdRemote.NewClient(MainNamespace, d)
|
||||
d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
)
|
||||
|
||||
// errImageDoesNotExist is error returned when no image can be found for a reference.
|
||||
@@ -59,21 +58,6 @@ func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error)
|
||||
return id, imageOS, nil
|
||||
}
|
||||
|
||||
// deprecated: repo:shortid https://github.com/docker/docker/pull/799
|
||||
if tagged, ok := namedRef.(reference.Tagged); ok {
|
||||
if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) {
|
||||
for platform := range daemon.stores {
|
||||
if id, err := daemon.stores[platform].imageStore.Search(tag); err == nil {
|
||||
for _, storeRef := range daemon.referenceStore.References(id.Digest()) {
|
||||
if storeRef.Name() == namedRef.Name() {
|
||||
return id, platform, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Search based on ID
|
||||
for os := range daemon.stores {
|
||||
if id, err := daemon.stores[os].imageStore.Search(refOrID); err == nil {
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/errdefs"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
@@ -97,15 +97,11 @@ func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int)
|
||||
}
|
||||
|
||||
if err := daemon.kill(container, sig); err != nil {
|
||||
err = errors.Wrapf(err, "Cannot kill container %s", container.ID)
|
||||
// if container or process not exists, ignore the error
|
||||
// TODO: we shouldn't have to parse error strings from containerd
|
||||
if strings.Contains(err.Error(), "container not found") ||
|
||||
strings.Contains(err.Error(), "no such process") {
|
||||
logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error())
|
||||
if errdefs.IsNotFound(err) {
|
||||
unpause = false
|
||||
logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'")
|
||||
} else {
|
||||
return err
|
||||
return errors.Wrapf(err, "Cannot kill container %s", container.ID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,7 +167,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error {
|
||||
// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error.
|
||||
func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error {
|
||||
err := daemon.killWithSignal(container, sig)
|
||||
if err == syscall.ESRCH {
|
||||
if errdefs.IsNotFound(err) {
|
||||
e := errNoSuchProcess{container.GetPID(), sig}
|
||||
logrus.Debug(e)
|
||||
return e
|
||||
|
||||
@@ -23,6 +23,18 @@ keywords: "API, Docker, rcli, REST, documentation"
|
||||
configuration is only used for Windows containers.
|
||||
* `GET /containers/(name)/logs` now supports an additional query parameter: `until`,
|
||||
which returns log lines that occurred before the specified timestamp.
|
||||
* `POST /containers/{id}/exec` now accepts a `WorkingDir` property to set the
|
||||
work-dir for the exec process, independent of the container's work-dir.
|
||||
* `Get /version` now returns a `Platform.Name` field, which can be used by products
|
||||
using Moby as a foundation to return information about the platform.
|
||||
* `Get /version` now returns a `Components` field, which can be used to return
|
||||
information about the components used. Information about the engine itself is
|
||||
now included as a "Component" version, and contains all information from the
|
||||
top-level `Version`, `GitCommit`, `APIVersion`, `MinAPIVersion`, `GoVersion`,
|
||||
`Os`, `Arch`, `BuildTime`, `KernelVersion`, and `Experimental` fields. Going
|
||||
forward, the information from the `Components` section is preferred over their
|
||||
top-level counterparts.
|
||||
|
||||
|
||||
## v1.34 API changes
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
|
||||
|
||||
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
|
||||
RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
|
||||
CONTAINERD_COMMIT=v1.0.0
|
||||
CONTAINERD_COMMIT=89623f28b87a6004d4b785663257362d1658a729 # v1.0.0
|
||||
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
|
||||
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
|
||||
VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/request"
|
||||
@@ -48,25 +45,6 @@ func (s *DockerSuite) TestInfoAPI(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestInfoAPIRuncCommit tests that dockerd is able to obtain RunC version
|
||||
// information, and that the version matches the expected version
|
||||
func (s *DockerSuite) TestInfoAPIRuncCommit(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux) // Windows does not have RunC version information
|
||||
|
||||
res, body, err := request.Get("/v1.30/info")
|
||||
c.Assert(res.StatusCode, checker.Equals, http.StatusOK)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
b, err := request.ReadBody(body)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
var i types.Info
|
||||
|
||||
c.Assert(json.Unmarshal(b, &i), checker.IsNil)
|
||||
c.Assert(i.RuncCommit.ID, checker.Not(checker.Equals), "N/A")
|
||||
c.Assert(i.RuncCommit.ID, checker.Equals, i.RuncCommit.Expected)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
|
||||
|
||||
|
||||
@@ -4211,6 +4211,7 @@ func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) {
|
||||
c.Skip("Blacklisting for Docker CE")
|
||||
testRequires(c, NotaryHosting)
|
||||
|
||||
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
|
||||
@@ -4242,6 +4243,7 @@ func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) {
|
||||
c.Skip("Blacklisting for Docker CE")
|
||||
testRequires(c, NotaryHosting)
|
||||
|
||||
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
|
||||
|
||||
@@ -268,7 +268,6 @@ func (s *DockerSuite) TestCreateByImageID(c *check.C) {
|
||||
|
||||
dockerCmd(c, "create", imageID)
|
||||
dockerCmd(c, "create", truncatedImageID)
|
||||
dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID))
|
||||
|
||||
// Ensure this fails
|
||||
out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID))
|
||||
@@ -280,7 +279,10 @@ func (s *DockerSuite) TestCreateByImageID(c *check.C) {
|
||||
c.Fatalf(`Expected %q in output; got: %s`, expected, out)
|
||||
}
|
||||
|
||||
out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID))
|
||||
if i := strings.IndexRune(imageID, ':'); i >= 0 {
|
||||
imageID = imageID[i+1:]
|
||||
}
|
||||
out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", imageID))
|
||||
if exit == 0 {
|
||||
c.Fatalf("expected non-zero exit code; received %d", exit)
|
||||
}
|
||||
|
||||
@@ -1451,7 +1451,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec
|
||||
|
||||
// kill the container
|
||||
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
"--namespace", moby_daemon.MainNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
|
||||
"--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
|
||||
|
||||
// restart daemon.
|
||||
d.Restart(c)
|
||||
@@ -2011,7 +2011,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check
|
||||
|
||||
// kill the container
|
||||
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
"--namespace", moby_daemon.MainNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
|
||||
"--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
|
||||
|
||||
// Give time to containerd to process the command if we don't
|
||||
// the exit event might be received after we do the inspect
|
||||
@@ -2106,7 +2106,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *che
|
||||
result := icmd.RunCommand(
|
||||
ctrBinary,
|
||||
"--address", "/var/run/docker/containerd/docker-containerd.sock",
|
||||
"--namespace", moby_daemon.MainNamespace,
|
||||
"--namespace", moby_daemon.ContainersNamespace,
|
||||
"tasks", "resume", cid)
|
||||
result.Assert(t, icmd.Success)
|
||||
|
||||
|
||||
@@ -13,9 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) {
|
||||
|
||||
// @TODO TestLogoutWithExternalAuth expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported
|
||||
s.d.StartWithBusybox(c, "--disable-legacy-registry=false")
|
||||
s.d.StartWithBusybox(c)
|
||||
|
||||
osPath := os.Getenv("PATH")
|
||||
defer os.Setenv("PATH", osPath)
|
||||
@@ -62,7 +60,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C)
|
||||
// check I cannot pull anymore
|
||||
out, err := s.d.Cmd("--config", tmp, "pull", repoName)
|
||||
c.Assert(err, check.NotNil, check.Commentf(out))
|
||||
c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found")
|
||||
c.Assert(out, checker.Contains, "no basic auth credentials")
|
||||
}
|
||||
|
||||
// #23100
|
||||
|
||||
@@ -259,18 +259,6 @@ func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) {
|
||||
c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected"))
|
||||
}
|
||||
|
||||
func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) {
|
||||
// @TODO TestPullNoCredentialsNotFound expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported
|
||||
s.d.StartWithBusybox(c, "--disable-legacy-registry=false")
|
||||
|
||||
// we don't care about the actual image, we just want to see image not found
|
||||
// because that means v2 call returned 401 and we fell back to v1 which usually
|
||||
// gives a 404 (in this case the test registry doesn't handle v1 at all)
|
||||
out, err := s.d.Cmd("pull", privateRegistryURL+"/busybox")
|
||||
c.Assert(err, check.NotNil, check.Commentf(out))
|
||||
c.Assert(out, checker.Contains, "Error: image busybox:latest not found")
|
||||
}
|
||||
|
||||
// Regression test for https://github.com/docker/docker/issues/26429
|
||||
func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) {
|
||||
testRequires(c, DaemonIsWindows, Network)
|
||||
|
||||
@@ -133,6 +133,7 @@ func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) {
|
||||
c.Skip("Blacklisting for Docker CE")
|
||||
testRequires(c, NotaryHosting)
|
||||
repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL)
|
||||
targetName := fmt.Sprintf("%s:latest", repoName)
|
||||
@@ -188,6 +189,7 @@ func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) {
|
||||
c.Skip("Blacklisting for Docker CE")
|
||||
testRequires(c, NotaryHosting)
|
||||
repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL)
|
||||
targetName := fmt.Sprintf("%s:latest", repoName)
|
||||
|
||||
@@ -282,6 +282,7 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {
|
||||
c.Skip("Blacklisting for Docker CE")
|
||||
repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL)
|
||||
// tag the image and upload it to the private registry
|
||||
cli.DockerCmd(c, "tag", "busybox", repoName)
|
||||
@@ -366,6 +367,7 @@ func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) {
|
||||
c.Skip("Blacklisting for Docker CE")
|
||||
repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL)
|
||||
// tag the image and upload it to the private registry
|
||||
cli.DockerCmd(c, "tag", "busybox", repoName)
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/integration-cli/checker"
|
||||
"github.com/docker/docker/integration-cli/cli/build"
|
||||
"github.com/docker/docker/internal/testutil"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
@@ -140,29 +137,3 @@ func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) {
|
||||
c.Fatal("tagging with image named \"sha256\" should have failed")
|
||||
}
|
||||
}
|
||||
|
||||
// ensure tags cannot create ambiguity with image ids
|
||||
func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) {
|
||||
buildImageSuccessfully(c, "notbusybox:latest", build.WithDockerfile(`FROM busybox
|
||||
MAINTAINER dockerio`))
|
||||
imageID := getIDByName(c, "notbusybox:latest")
|
||||
truncatedImageID := stringid.TruncateID(imageID)
|
||||
truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID)
|
||||
|
||||
id := inspectField(c, truncatedTag, "Id")
|
||||
|
||||
// Ensure inspect by image id returns image for image id
|
||||
c.Assert(id, checker.Equals, imageID)
|
||||
c.Logf("Built image: %s", imageID)
|
||||
|
||||
// test setting tag fails
|
||||
_, _, err := dockerCmdWithError("tag", "busybox:latest", truncatedTag)
|
||||
if err != nil {
|
||||
c.Fatalf("Error tagging with an image id: %s", err)
|
||||
}
|
||||
|
||||
id = inspectField(c, truncatedTag, "Id")
|
||||
|
||||
// Ensure id is imageID and not busybox:latest
|
||||
c.Assert(id, checker.Not(checker.Equals), imageID)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func makefile(path string, contents string) (string, error) {
|
||||
return f.Name(), nil
|
||||
}
|
||||
|
||||
// TestV2Only ensures that a daemon by default does not
|
||||
// TestV2Only ensures that a daemon does not
|
||||
// attempt to contact any v1 registry endpoints.
|
||||
func (s *DockerRegistrySuite) TestV2Only(c *check.C) {
|
||||
reg, err := registry.NewMock(c)
|
||||
@@ -56,65 +56,3 @@ func (s *DockerRegistrySuite) TestV2Only(c *check.C) {
|
||||
s.d.Cmd("push", repoName)
|
||||
s.d.Cmd("pull", repoName)
|
||||
}
|
||||
|
||||
// TestV1 starts a daemon with legacy registries enabled
|
||||
// and ensure v1 endpoints are hit for the following operations:
|
||||
// login, push, pull, build & run
|
||||
func (s *DockerRegistrySuite) TestV1(c *check.C) {
|
||||
reg, err := registry.NewMock(c)
|
||||
defer reg.Close()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
v2Pings := 0
|
||||
reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) {
|
||||
v2Pings++
|
||||
// V2 ping 404 causes fallback to v1
|
||||
w.WriteHeader(404)
|
||||
})
|
||||
|
||||
v1Pings := 0
|
||||
reg.RegisterHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) {
|
||||
v1Pings++
|
||||
})
|
||||
|
||||
v1Logins := 0
|
||||
reg.RegisterHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) {
|
||||
v1Logins++
|
||||
})
|
||||
|
||||
v1Repo := 0
|
||||
reg.RegisterHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) {
|
||||
v1Repo++
|
||||
})
|
||||
|
||||
reg.RegisterHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) {
|
||||
v1Repo++
|
||||
})
|
||||
|
||||
s.d.Start(c, "--insecure-registry", reg.URL(), "--disable-legacy-registry=false")
|
||||
|
||||
tmp, err := ioutil.TempDir("", "integration-cli-")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL()))
|
||||
c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile"))
|
||||
|
||||
s.d.Cmd("build", "--file", dockerfileName, tmp)
|
||||
c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build"))
|
||||
|
||||
repoName := fmt.Sprintf("%s/busybox", reg.URL())
|
||||
s.d.Cmd("run", repoName)
|
||||
c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run"))
|
||||
|
||||
s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL())
|
||||
c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt"))
|
||||
|
||||
s.d.Cmd("tag", "busybox", repoName)
|
||||
s.d.Cmd("push", repoName)
|
||||
|
||||
c.Assert(v1Repo, check.Equals, 2)
|
||||
|
||||
s.d.Cmd("pull", repoName)
|
||||
c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull"))
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ const notaryHost = "localhost:4443"
|
||||
const notaryURL = "https://" + notaryHost
|
||||
|
||||
var SuccessTagging = icmd.Expected{
|
||||
Out: "Tagging",
|
||||
Err: "Tagging",
|
||||
}
|
||||
|
||||
var SuccessSigningAndPushing = icmd.Expected{
|
||||
|
||||
112
integration/container/restart_test.go
Normal file
112
integration/container/restart_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/integration-cli/daemon"
|
||||
)
|
||||
|
||||
func TestDaemonRestartKillContainers(t *testing.T) {
|
||||
type testCase struct {
|
||||
desc string
|
||||
config *container.Config
|
||||
hostConfig *container.HostConfig
|
||||
|
||||
xRunning bool
|
||||
xRunningLiveRestore bool
|
||||
}
|
||||
|
||||
for _, c := range []testCase{
|
||||
{
|
||||
desc: "container without restart policy",
|
||||
config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
|
||||
xRunningLiveRestore: true,
|
||||
},
|
||||
{
|
||||
desc: "container with restart=always",
|
||||
config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
|
||||
hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
|
||||
xRunning: true,
|
||||
xRunningLiveRestore: true,
|
||||
},
|
||||
} {
|
||||
for _, liveRestoreEnabled := range []bool{false, true} {
|
||||
for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){
|
||||
"kill-daemon": func(t *testing.T, d *daemon.Daemon) {
|
||||
if err := d.Kill(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
"stop-daemon": func(t *testing.T, d *daemon.Daemon) {
|
||||
d.Stop(t)
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, c.desc, fnName), func(t *testing.T) {
|
||||
c := c
|
||||
liveRestoreEnabled := liveRestoreEnabled
|
||||
stopDaemon := stopDaemon
|
||||
|
||||
t.Parallel()
|
||||
|
||||
d := daemon.New(t, "", "dockerd", daemon.Config{})
|
||||
client, err := d.NewClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var args []string
|
||||
if liveRestoreEnabled {
|
||||
args = []string{"--live-restore"}
|
||||
}
|
||||
|
||||
d.StartWithBusybox(t, args...)
|
||||
defer d.Stop(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
|
||||
|
||||
if err := client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stopDaemon(t, d)
|
||||
d.Start(t, args...)
|
||||
|
||||
expected := c.xRunning
|
||||
if liveRestoreEnabled {
|
||||
expected = c.xRunningLiveRestore
|
||||
}
|
||||
|
||||
var running bool
|
||||
for i := 0; i < 30; i++ {
|
||||
inspect, err := client.ContainerInspect(ctx, resp.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
running = inspect.State.Running
|
||||
if running == expected {
|
||||
break
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
}
|
||||
|
||||
if running != expected {
|
||||
t.Fatalf("got unexpected running state, expected %v, got: %v", expected, running)
|
||||
}
|
||||
// TODO(cpuguy83): test pause states... this seems to be rather undefined currently
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
34
integration/system/info_linux_test.go
Normal file
34
integration/system/info_linux_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// +build !windows
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/integration/util/request"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestInfo_BinaryCommits(t *testing.T) {
|
||||
client := request.NewAPIClient(t)
|
||||
|
||||
info, err := client.Info(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, info.ContainerdCommit)
|
||||
assert.NotEqual(t, "N/A", info.ContainerdCommit.ID)
|
||||
assert.Equal(t, testEnv.DaemonInfo.ContainerdCommit.Expected, info.ContainerdCommit.Expected)
|
||||
assert.Equal(t, info.ContainerdCommit.Expected, info.ContainerdCommit.ID)
|
||||
|
||||
assert.NotNil(t, info.InitCommit)
|
||||
assert.NotEqual(t, "N/A", info.InitCommit.ID)
|
||||
assert.Equal(t, testEnv.DaemonInfo.InitCommit.Expected, info.InitCommit.Expected)
|
||||
assert.Equal(t, info.InitCommit.Expected, info.InitCommit.ID)
|
||||
|
||||
assert.NotNil(t, info.RuncCommit)
|
||||
assert.NotEqual(t, "N/A", info.RuncCommit.ID)
|
||||
assert.Equal(t, testEnv.DaemonInfo.RuncCommit.Expected, info.RuncCommit.Expected)
|
||||
assert.Equal(t, info.RuncCommit.Expected, info.RuncCommit.ID)
|
||||
}
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/containerd/containerd/archive"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/linux/runctypes"
|
||||
"github.com/containerd/typeurl"
|
||||
@@ -42,7 +43,7 @@ import (
|
||||
const InitProcessName = "init"
|
||||
|
||||
type container struct {
|
||||
sync.Mutex
|
||||
mu sync.Mutex
|
||||
|
||||
bundleDir string
|
||||
ctr containerd.Container
|
||||
@@ -51,6 +52,54 @@ type container struct {
|
||||
oomKilled bool
|
||||
}
|
||||
|
||||
func (c *container) setTask(t containerd.Task) {
|
||||
c.mu.Lock()
|
||||
c.task = t
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *container) getTask() containerd.Task {
|
||||
c.mu.Lock()
|
||||
t := c.task
|
||||
c.mu.Unlock()
|
||||
return t
|
||||
}
|
||||
|
||||
func (c *container) addProcess(id string, p containerd.Process) {
|
||||
c.mu.Lock()
|
||||
if c.execs == nil {
|
||||
c.execs = make(map[string]containerd.Process)
|
||||
}
|
||||
c.execs[id] = p
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *container) deleteProcess(id string) {
|
||||
c.mu.Lock()
|
||||
delete(c.execs, id)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *container) getProcess(id string) containerd.Process {
|
||||
c.mu.Lock()
|
||||
p := c.execs[id]
|
||||
c.mu.Unlock()
|
||||
return p
|
||||
}
|
||||
|
||||
func (c *container) setOOMKilled(killed bool) {
|
||||
c.mu.Lock()
|
||||
c.oomKilled = killed
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *container) getOOMKilled() bool {
|
||||
c.mu.Lock()
|
||||
killed := c.oomKilled
|
||||
c.mu.Unlock()
|
||||
return killed
|
||||
}
|
||||
|
||||
type client struct {
|
||||
sync.RWMutex // protects containers map
|
||||
|
||||
@@ -160,10 +209,10 @@ func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, run
|
||||
// Start create and start a task for the specified containerd id
|
||||
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio StdioCallback) (int, error) {
|
||||
ctr := c.getContainer(id)
|
||||
switch {
|
||||
case ctr == nil:
|
||||
if ctr == nil {
|
||||
return -1, errors.WithStack(newNotFoundError("no such container"))
|
||||
case ctr.task != nil:
|
||||
}
|
||||
if t := ctr.getTask(); t != nil {
|
||||
return -1, errors.WithStack(newConflictError("container already started"))
|
||||
}
|
||||
|
||||
@@ -227,9 +276,7 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin
|
||||
return -1, err
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
c.containers[id].task = t
|
||||
c.Unlock()
|
||||
ctr.setTask(t)
|
||||
|
||||
// Signal c.createIO that it can call CloseIO
|
||||
close(stdinCloseSync)
|
||||
@@ -239,9 +286,7 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin
|
||||
c.logger.WithError(err).WithField("container", id).
|
||||
Error("failed to delete task after fail start")
|
||||
}
|
||||
c.Lock()
|
||||
c.containers[id].task = nil
|
||||
c.Unlock()
|
||||
ctr.setTask(nil)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
@@ -250,12 +295,15 @@ func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin
|
||||
|
||||
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
|
||||
ctr := c.getContainer(containerID)
|
||||
switch {
|
||||
case ctr == nil:
|
||||
if ctr == nil {
|
||||
return -1, errors.WithStack(newNotFoundError("no such container"))
|
||||
case ctr.task == nil:
|
||||
}
|
||||
t := ctr.getTask()
|
||||
if t == nil {
|
||||
return -1, errors.WithStack(newInvalidParameterError("container is not running"))
|
||||
case ctr.execs != nil && ctr.execs[processID] != nil:
|
||||
}
|
||||
|
||||
if p := ctr.getProcess(processID); p != nil {
|
||||
return -1, errors.WithStack(newConflictError("id already in use"))
|
||||
}
|
||||
|
||||
@@ -278,7 +326,7 @@ func (c *client) Exec(ctx context.Context, containerID, processID string, spec *
|
||||
}
|
||||
}()
|
||||
|
||||
p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
|
||||
p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
|
||||
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
|
||||
return rio, err
|
||||
})
|
||||
@@ -291,21 +339,14 @@ func (c *client) Exec(ctx context.Context, containerID, processID string, spec *
|
||||
return -1, err
|
||||
}
|
||||
|
||||
ctr.Lock()
|
||||
if ctr.execs == nil {
|
||||
ctr.execs = make(map[string]containerd.Process)
|
||||
}
|
||||
ctr.execs[processID] = p
|
||||
ctr.Unlock()
|
||||
ctr.addProcess(processID, p)
|
||||
|
||||
// Signal c.createIO that it can call CloseIO
|
||||
close(stdinCloseSync)
|
||||
|
||||
if err = p.Start(ctx); err != nil {
|
||||
p.Delete(context.Background())
|
||||
ctr.Lock()
|
||||
delete(ctr.execs, processID)
|
||||
ctr.Unlock()
|
||||
ctr.deleteProcess(processID)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
@@ -317,7 +358,7 @@ func (c *client) SignalProcess(ctx context.Context, containerID, processID strin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.Kill(ctx, syscall.Signal(signal))
|
||||
return wrapError(p.Kill(ctx, syscall.Signal(signal)))
|
||||
}
|
||||
|
||||
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
|
||||
@@ -431,12 +472,9 @@ func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, ti
|
||||
return 255, time.Now(), nil
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
if ctr, ok := c.containers[containerID]; ok {
|
||||
ctr.task = nil
|
||||
if ctr := c.getContainer(containerID); ctr != nil {
|
||||
ctr.setTask(nil)
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
return status.ExitCode(), status.ExitTime(), nil
|
||||
}
|
||||
|
||||
@@ -470,7 +508,12 @@ func (c *client) Status(ctx context.Context, containerID string) (Status, error)
|
||||
return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
|
||||
}
|
||||
|
||||
s, err := ctr.task.Status(ctx)
|
||||
t := ctr.getTask()
|
||||
if t == nil {
|
||||
return StatusUnknown, errors.WithStack(newNotFoundError("no such task"))
|
||||
}
|
||||
|
||||
s, err := t.Status(ctx)
|
||||
if err != nil {
|
||||
return StatusUnknown, err
|
||||
}
|
||||
@@ -546,26 +589,22 @@ func (c *client) removeContainer(id string) {
|
||||
|
||||
func (c *client) getProcess(containerID, processID string) (containerd.Process, error) {
|
||||
ctr := c.getContainer(containerID)
|
||||
switch {
|
||||
case ctr == nil:
|
||||
if ctr == nil {
|
||||
return nil, errors.WithStack(newNotFoundError("no such container"))
|
||||
case ctr.task == nil:
|
||||
return nil, errors.WithStack(newNotFoundError("container is not running"))
|
||||
case processID == InitProcessName:
|
||||
return ctr.task, nil
|
||||
default:
|
||||
ctr.Lock()
|
||||
defer ctr.Unlock()
|
||||
if ctr.execs == nil {
|
||||
return nil, errors.WithStack(newNotFoundError("no execs"))
|
||||
}
|
||||
}
|
||||
|
||||
p := ctr.execs[processID]
|
||||
t := ctr.getTask()
|
||||
if t == nil {
|
||||
return nil, errors.WithStack(newNotFoundError("container is not running"))
|
||||
}
|
||||
if processID == InitProcessName {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
p := ctr.getProcess(processID)
|
||||
if p == nil {
|
||||
return nil, errors.WithStack(newNotFoundError("no such exec"))
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@@ -623,12 +662,7 @@ func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) {
|
||||
}
|
||||
|
||||
if et == EventExit && ei.ProcessID != ei.ContainerID {
|
||||
var p containerd.Process
|
||||
ctr.Lock()
|
||||
if ctr.execs != nil {
|
||||
p = ctr.execs[ei.ProcessID]
|
||||
}
|
||||
ctr.Unlock()
|
||||
p := ctr.getProcess(ei.ProcessID)
|
||||
if p == nil {
|
||||
c.logger.WithError(errors.New("no such process")).
|
||||
WithFields(logrus.Fields{
|
||||
@@ -644,9 +678,8 @@ func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) {
|
||||
"process": ei.ProcessID,
|
||||
}).Warn("failed to delete process")
|
||||
}
|
||||
c.Lock()
|
||||
delete(ctr.execs, ei.ProcessID)
|
||||
c.Unlock()
|
||||
ctr.deleteProcess(ei.ProcessID)
|
||||
|
||||
ctr := c.getContainer(ei.ContainerID)
|
||||
if ctr == nil {
|
||||
c.logger.WithFields(logrus.Fields{
|
||||
@@ -783,10 +816,10 @@ func (c *client) processEventStream(ctx context.Context) {
|
||||
}
|
||||
|
||||
if oomKilled {
|
||||
ctr.oomKilled = true
|
||||
ctr.setOOMKilled(true)
|
||||
oomKilled = false
|
||||
}
|
||||
ei.OOMKilled = ctr.oomKilled
|
||||
ei.OOMKilled = ctr.getOOMKilled()
|
||||
|
||||
c.processEvent(ctr, et, ei)
|
||||
}
|
||||
@@ -816,12 +849,19 @@ func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.R
|
||||
}
|
||||
|
||||
func wrapError(err error) error {
|
||||
if err != nil {
|
||||
msg := err.Error()
|
||||
for _, s := range []string{"container does not exist", "not found", "no such container"} {
|
||||
if strings.Contains(msg, s) {
|
||||
return wrapNotFoundError(err)
|
||||
}
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case errdefs.IsNotFound(err):
|
||||
return wrapNotFoundError(err)
|
||||
}
|
||||
|
||||
msg := err.Error()
|
||||
for _, s := range []string{"container does not exist", "not found", "no such container"} {
|
||||
if strings.Contains(msg, s) {
|
||||
return wrapNotFoundError(err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
package tarsum
|
||||
|
||||
import "sort"
|
||||
import (
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FileInfoSumInterface provides an interface for accessing file checksum
|
||||
// information within a tar file. This info is accessed through interface
|
||||
@@ -35,8 +39,11 @@ type FileInfoSums []FileInfoSumInterface
|
||||
|
||||
// GetFile returns the first FileInfoSumInterface with a matching name.
|
||||
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
|
||||
// We do case insensitive matching on Windows as c:\APP and c:\app are
|
||||
// the same. See issue #33107.
|
||||
for i := range fis {
|
||||
if fis[i].Name() == name {
|
||||
if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) ||
|
||||
(runtime.GOOS != "windows" && fis[i].Name() == name) {
|
||||
return fis[i]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
// PluginNamespace is the name used for the plugins namespace
|
||||
var PluginNamespace = "moby-plugins"
|
||||
var PluginNamespace = "plugins.moby"
|
||||
|
||||
// ExitHandler represents an object that is called when the exit event is received from containerd
|
||||
type ExitHandler interface {
|
||||
|
||||
10
vendor.conf
10
vendor.conf
@@ -30,7 +30,7 @@ github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
|
||||
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
|
||||
|
||||
#get libnetwork packages
|
||||
github.com/docker/libnetwork 9bca9a4a220b158cc94402e0f8c2c7714eb6f503
|
||||
github.com/docker/libnetwork 7dd202d11b2078c063fefc614c9fb9e4615da0e1
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
@@ -77,7 +77,7 @@ github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
|
||||
github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
|
||||
|
||||
# gelf logging driver deps
|
||||
github.com/Graylog2/go-gelf v2
|
||||
github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841
|
||||
|
||||
github.com/fluent/fluent-logger-golang v1.3.0
|
||||
# fluent-logger-golang deps
|
||||
@@ -103,7 +103,7 @@ github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
|
||||
# containerd
|
||||
github.com/containerd/containerd v1.0.0
|
||||
github.com/containerd/containerd 89623f28b87a6004d4b785663257362d1658a729 # v1.0.0
|
||||
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
|
||||
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
|
||||
github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
|
||||
@@ -114,7 +114,7 @@ github.com/dmcgowan/go-tar go1.10
|
||||
github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit 4429c763170d9ca96929249353c3270c19e7d39e
|
||||
github.com/docker/swarmkit 7598f7a937de4ad0a856012bd548009ceeb0d10e
|
||||
github.com/gogo/protobuf v0.4
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
|
||||
@@ -148,7 +148,7 @@ github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd
|
||||
# archive/tar
|
||||
# mkdir -p ./vendor/archive
|
||||
# git clone git://github.com/tonistiigi/go-1.git ./go
|
||||
# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore
|
||||
# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore-1.9
|
||||
# cp -a go/src/archive/tar ./vendor/archive/tar
|
||||
# rm -rf ./go
|
||||
# vndr
|
||||
|
||||
20
vendor/archive/tar/common.go
vendored
20
vendor/archive/tar/common.go
vendored
@@ -158,11 +158,15 @@ func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
// Mode constants from the tar spec.
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
// Mode constants from the USTAR spec:
|
||||
// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
|
||||
// Common Unix mode constants; these are not defined in any common tar standard.
|
||||
// Header.FileInfo understands these, but FileInfoHeader will never produce these.
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
@@ -208,30 +212,24 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Mode |= c_ISREG
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Mode |= c_ISDIR
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Mode |= c_ISLNK
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Mode |= c_ISCHR
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Mode |= c_ISBLK
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
h.Mode |= c_ISFIFO
|
||||
case fm&os.ModeSocket != 0:
|
||||
h.Mode |= c_ISSOCK
|
||||
return nil, fmt.Errorf("archive/tar: sockets not supported")
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
|
||||
10
vendor/archive/tar/writer.go
vendored
10
vendor/archive/tar/writer.go
vendored
@@ -121,9 +121,15 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
f.formatString(b, s)
|
||||
|
||||
// Write string in a best-effort manner to satisfy readers that expect
|
||||
// the field to be non-empty.
|
||||
s = toASCII(s)
|
||||
if len(s) > len(b) {
|
||||
s = s[:len(b)]
|
||||
}
|
||||
f.formatString(b, s) // Should never error
|
||||
}
|
||||
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
|
||||
// Try octal first.
|
||||
|
||||
97
vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
generated
vendored
97
vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TCPReader struct {
|
||||
@@ -13,16 +14,21 @@ type TCPReader struct {
|
||||
messages chan []byte
|
||||
}
|
||||
|
||||
func newTCPReader(addr string) (*TCPReader, chan string, error) {
|
||||
type connChannels struct {
|
||||
drop chan string
|
||||
confirm chan string
|
||||
}
|
||||
|
||||
func newTCPReader(addr string) (*TCPReader, chan string, chan string, error) {
|
||||
var err error
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("ResolveTCPAddr('%s'): %s", addr, err)
|
||||
return nil, nil, nil, fmt.Errorf("ResolveTCPAddr('%s'): %s", addr, err)
|
||||
}
|
||||
|
||||
listener, err := net.ListenTCP("tcp", tcpAddr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("ListenTCP: %s", err)
|
||||
return nil, nil, nil, fmt.Errorf("ListenTCP: %s", err)
|
||||
}
|
||||
|
||||
r := &TCPReader{
|
||||
@@ -30,26 +36,61 @@ func newTCPReader(addr string) (*TCPReader, chan string, error) {
|
||||
messages: make(chan []byte, 100), // Make a buffered channel with at most 100 messages
|
||||
}
|
||||
|
||||
signal := make(chan string, 1)
|
||||
closeSignal := make(chan string, 1)
|
||||
doneSignal := make(chan string, 1)
|
||||
|
||||
go r.listenUntilCloseSignal(signal)
|
||||
go r.listenUntilCloseSignal(closeSignal, doneSignal)
|
||||
|
||||
return r, signal, nil
|
||||
return r, closeSignal, doneSignal, nil
|
||||
}
|
||||
|
||||
func (r *TCPReader) listenUntilCloseSignal(signal chan string) {
|
||||
defer func() { signal <- "done" }()
|
||||
defer r.listener.Close()
|
||||
func (r *TCPReader) accepter(connections chan net.Conn) {
|
||||
for {
|
||||
conn, err := r.listener.Accept()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
go handleConnection(conn, r.messages)
|
||||
connections <- conn
|
||||
}
|
||||
}
|
||||
|
||||
func (r *TCPReader) listenUntilCloseSignal(closeSignal chan string, doneSignal chan string) {
|
||||
defer func() { doneSignal <- "done" }()
|
||||
defer r.listener.Close()
|
||||
var conns []connChannels
|
||||
connectionsChannel := make(chan net.Conn, 1)
|
||||
go r.accepter(connectionsChannel)
|
||||
for {
|
||||
select {
|
||||
case sig := <-signal:
|
||||
if sig == "stop" {
|
||||
break
|
||||
case conn := <-connectionsChannel:
|
||||
dropSignal := make(chan string, 1)
|
||||
dropConfirm := make(chan string, 1)
|
||||
channels := connChannels{drop: dropSignal, confirm: dropConfirm}
|
||||
go handleConnection(conn, r.messages, dropSignal, dropConfirm)
|
||||
conns = append(conns, channels)
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case sig := <-closeSignal:
|
||||
if sig == "stop" || sig == "drop" {
|
||||
if len(conns) >= 1 {
|
||||
for _, s := range conns {
|
||||
if s.drop != nil {
|
||||
s.drop <- "drop"
|
||||
<-s.confirm
|
||||
conns = append(conns[:0], conns[1:]...)
|
||||
}
|
||||
}
|
||||
if sig == "stop" {
|
||||
return
|
||||
}
|
||||
} else if sig == "stop" {
|
||||
closeSignal <- "stop"
|
||||
}
|
||||
if sig == "drop" {
|
||||
doneSignal <- "done"
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
@@ -60,19 +101,41 @@ func (r *TCPReader) addr() string {
|
||||
return r.listener.Addr().String()
|
||||
}
|
||||
|
||||
func handleConnection(conn net.Conn, messages chan<- []byte) {
|
||||
func handleConnection(conn net.Conn, messages chan<- []byte, dropSignal chan string, dropConfirm chan string) {
|
||||
defer func() { dropConfirm <- "done" }()
|
||||
defer conn.Close()
|
||||
reader := bufio.NewReader(conn)
|
||||
|
||||
var b []byte
|
||||
var err error
|
||||
drop := false
|
||||
canDrop := false
|
||||
|
||||
for {
|
||||
conn.SetDeadline(time.Now().Add(2 * time.Second))
|
||||
if b, err = reader.ReadBytes(0); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(b) > 0 {
|
||||
if drop {
|
||||
return
|
||||
}
|
||||
} else if len(b) > 0 {
|
||||
messages <- b
|
||||
canDrop = true
|
||||
if drop {
|
||||
return
|
||||
}
|
||||
} else if drop {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case sig := <-dropSignal:
|
||||
if sig == "drop" {
|
||||
drop = true
|
||||
time.Sleep(1 * time.Second)
|
||||
if canDrop {
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
12
vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
generated
vendored
12
vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
generated
vendored
@@ -75,12 +75,17 @@ func (w *TCPWriter) Write(p []byte) (n int, err error) {
|
||||
|
||||
func (w *TCPWriter) writeToSocketWithReconnectAttempts(zBytes []byte) (n int, err error) {
|
||||
var errConn error
|
||||
var i int
|
||||
|
||||
w.mu.Lock()
|
||||
for i := 0; n <= w.MaxReconnect; i++ {
|
||||
for i = 0; i <= w.MaxReconnect; i++ {
|
||||
errConn = nil
|
||||
|
||||
n, err = w.conn.Write(zBytes)
|
||||
if w.conn != nil {
|
||||
n, err = w.conn.Write(zBytes)
|
||||
} else {
|
||||
err = fmt.Errorf("Connection was nil, will attempt reconnect")
|
||||
}
|
||||
if err != nil {
|
||||
time.Sleep(w.ReconnectDelay * time.Second)
|
||||
w.conn, errConn = net.Dial("tcp", w.addr)
|
||||
@@ -90,6 +95,9 @@ func (w *TCPWriter) writeToSocketWithReconnectAttempts(zBytes []byte) (n int, er
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if i > w.MaxReconnect {
|
||||
return 0, fmt.Errorf("Maximum reconnection attempts was reached; giving up")
|
||||
}
|
||||
if errConn != nil {
|
||||
return 0, fmt.Errorf("Write Failed: %s\nReconnection failed: %s", err, errConn)
|
||||
}
|
||||
|
||||
3
vendor/github.com/Graylog2/go-gelf/gelf/writer.go
generated
vendored
3
vendor/github.com/Graylog2/go-gelf/gelf/writer.go
generated
vendored
@@ -27,5 +27,8 @@ type GelfWriter struct {
|
||||
|
||||
// Close connection and interrupt blocked Read or Write operations
|
||||
func (w *GelfWriter) Close() error {
|
||||
if w.conn == nil {
|
||||
return nil
|
||||
}
|
||||
return w.conn.Close()
|
||||
}
|
||||
|
||||
3
vendor/github.com/docker/libnetwork/diagnose/server.go
generated
vendored
3
vendor/github.com/docker/libnetwork/diagnose/server.go
generated
vendored
@@ -91,7 +91,7 @@ func (s *Server) EnableDebug(ip string, port int) {
|
||||
}
|
||||
|
||||
logrus.Infof("Starting the diagnose server listening on %d for commands", port)
|
||||
srv := &http.Server{Addr: fmt.Sprintf("127.0.0.1:%d", port), Handler: s}
|
||||
srv := &http.Server{Addr: fmt.Sprintf("%s:%d", ip, port), Handler: s}
|
||||
s.srv = srv
|
||||
s.enable = 1
|
||||
go func(n *Server) {
|
||||
@@ -101,7 +101,6 @@ func (s *Server) EnableDebug(ip string, port int) {
|
||||
atomic.SwapInt32(&n.enable, 0)
|
||||
}
|
||||
}(s)
|
||||
|
||||
}
|
||||
|
||||
// DisableDebug stop the dubug and closes the tcp socket
|
||||
|
||||
77
vendor/github.com/docker/libnetwork/networkdb/delegate.go
generated
vendored
77
vendor/github.com/docker/libnetwork/networkdb/delegate.go
generated
vendored
@@ -16,46 +16,28 @@ func (d *delegate) NodeMeta(limit int) []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// getNode searches the node inside the tables
|
||||
// returns true if the node was respectively in the active list, explicit node leave list or failed list
|
||||
func (nDB *NetworkDB) getNode(nEvent *NodeEvent, extract bool) (bool, bool, bool, *node) {
|
||||
var active bool
|
||||
var left bool
|
||||
var failed bool
|
||||
|
||||
for _, nodes := range []map[string]*node{
|
||||
nDB.failedNodes,
|
||||
nDB.leftNodes,
|
||||
nDB.nodes,
|
||||
} {
|
||||
if n, ok := nodes[nEvent.NodeName]; ok {
|
||||
active = &nodes == &nDB.nodes
|
||||
left = &nodes == &nDB.leftNodes
|
||||
failed = &nodes == &nDB.failedNodes
|
||||
if n.ltime >= nEvent.LTime {
|
||||
return active, left, failed, nil
|
||||
}
|
||||
if extract {
|
||||
delete(nodes, n.Name)
|
||||
}
|
||||
return active, left, failed, n
|
||||
}
|
||||
}
|
||||
return active, left, failed, nil
|
||||
}
|
||||
|
||||
func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
|
||||
// Update our local clock if the received messages has newer
|
||||
// time.
|
||||
nDB.networkClock.Witness(nEvent.LTime)
|
||||
|
||||
nDB.RLock()
|
||||
active, left, _, n := nDB.getNode(nEvent, false)
|
||||
defer nDB.RUnlock()
|
||||
|
||||
// check if the node exists
|
||||
n, _, _ := nDB.findNode(nEvent.NodeName)
|
||||
if n == nil {
|
||||
nDB.RUnlock()
|
||||
return false
|
||||
}
|
||||
nDB.RUnlock()
|
||||
|
||||
// check if the event is fresh
|
||||
if n.ltime >= nEvent.LTime {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we are here means that the event is fresher and the node is known. Update the laport time
|
||||
n.ltime = nEvent.LTime
|
||||
|
||||
// If it is a node leave event for a manager and this is the only manager we
|
||||
// know of we want the reconnect logic to kick in. In a single manager
|
||||
// cluster manager's gossip can't be bootstrapped unless some other node
|
||||
@@ -63,45 +45,32 @@ func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
|
||||
if len(nDB.bootStrapIP) == 1 && nEvent.Type == NodeEventTypeLeave {
|
||||
for _, ip := range nDB.bootStrapIP {
|
||||
if ip.Equal(n.Addr) {
|
||||
n.ltime = nEvent.LTime
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n.ltime = nEvent.LTime
|
||||
|
||||
switch nEvent.Type {
|
||||
case NodeEventTypeJoin:
|
||||
if active {
|
||||
// the node is already marked as active nothing to do
|
||||
moved, err := nDB.changeNodeState(n.Name, nodeActiveState)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("unable to find the node to move")
|
||||
return false
|
||||
}
|
||||
nDB.Lock()
|
||||
// Because the lock got released on the previous check we have to do it again and re verify the status of the node
|
||||
// All of this is to avoid a big lock on the function
|
||||
if active, _, _, n = nDB.getNode(nEvent, true); !active && n != nil {
|
||||
n.reapTime = 0
|
||||
nDB.nodes[n.Name] = n
|
||||
if moved {
|
||||
logrus.Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
|
||||
}
|
||||
nDB.Unlock()
|
||||
return true
|
||||
return moved
|
||||
case NodeEventTypeLeave:
|
||||
if left {
|
||||
// the node is already marked as left nothing to do.
|
||||
moved, err := nDB.changeNodeState(n.Name, nodeLeftState)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("unable to find the node to move")
|
||||
return false
|
||||
}
|
||||
nDB.Lock()
|
||||
// Because the lock got released on the previous check we have to do it again and re verify the status of the node
|
||||
// All of this is to avoid a big lock on the function
|
||||
if _, left, _, n = nDB.getNode(nEvent, true); !left && n != nil {
|
||||
n.reapTime = nodeReapInterval
|
||||
nDB.leftNodes[n.Name] = n
|
||||
if moved {
|
||||
logrus.Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
|
||||
}
|
||||
nDB.Unlock()
|
||||
return true
|
||||
return moved
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
61
vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
generated
vendored
61
vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
generated
vendored
@@ -21,24 +21,6 @@ func (e *eventDelegate) broadcastNodeEvent(addr net.IP, op opType) {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventDelegate) purgeReincarnation(mn *memberlist.Node) {
|
||||
for name, node := range e.nDB.failedNodes {
|
||||
if node.Addr.Equal(mn.Addr) {
|
||||
logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
|
||||
delete(e.nDB.failedNodes, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for name, node := range e.nDB.leftNodes {
|
||||
if node.Addr.Equal(mn.Addr) {
|
||||
logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
|
||||
delete(e.nDB.leftNodes, name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) {
|
||||
logrus.Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr)
|
||||
e.broadcastNodeEvent(mn.Addr, opCreate)
|
||||
@@ -57,44 +39,35 @@ func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) {
|
||||
// Every node has a unique ID
|
||||
// Check on the base of the IP address if the new node that joined is actually a new incarnation of a previous
|
||||
// failed or shutdown one
|
||||
e.purgeReincarnation(mn)
|
||||
e.nDB.purgeReincarnation(mn)
|
||||
|
||||
e.nDB.nodes[mn.Name] = &node{Node: *mn}
|
||||
logrus.Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr)
|
||||
}
|
||||
|
||||
func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) {
|
||||
var failed bool
|
||||
logrus.Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr)
|
||||
e.broadcastNodeEvent(mn.Addr, opDelete)
|
||||
// The node left or failed, delete all the entries created by it.
|
||||
// If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
|
||||
// If the node instead left because was going down, then it makes sense to just delete all its state
|
||||
|
||||
e.nDB.Lock()
|
||||
defer e.nDB.Unlock()
|
||||
e.nDB.deleteNodeFromNetworks(mn.Name)
|
||||
e.nDB.deleteNodeTableEntries(mn.Name)
|
||||
if n, ok := e.nDB.nodes[mn.Name]; ok {
|
||||
delete(e.nDB.nodes, mn.Name)
|
||||
|
||||
// Check if a new incarnation of the same node already joined
|
||||
// In that case this node can simply be removed and no further action are needed
|
||||
for name, node := range e.nDB.nodes {
|
||||
if node.Addr.Equal(mn.Addr) {
|
||||
logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", name, node.Addr, mn.Name, mn.Addr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// In case of node failure, keep retrying to reconnect every retryInterval (1sec) for nodeReapInterval (24h)
|
||||
// Explicit leave will have already removed the node from the list of nodes (nDB.nodes) and put it into the leftNodes map
|
||||
n.reapTime = nodeReapInterval
|
||||
e.nDB.failedNodes[mn.Name] = n
|
||||
failed = true
|
||||
n, currState, _ := e.nDB.findNode(mn.Name)
|
||||
if n == nil {
|
||||
logrus.Errorf("Node %s/%s not found in the node lists", mn.Name, mn.Addr)
|
||||
return
|
||||
}
|
||||
|
||||
if failed {
|
||||
logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
|
||||
// if the node was active means that did not send the leave cluster message, so it's probable that
|
||||
// failed. Else would be already in the left list so nothing else has to be done
|
||||
if currState == nodeActiveState {
|
||||
moved, err := e.nDB.changeNodeState(mn.Name, nodeFailedState)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("impossible condition, node %s/%s not present in the list", mn.Name, mn.Addr)
|
||||
return
|
||||
}
|
||||
if moved {
|
||||
logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
generated
vendored
1
vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
generated
vendored
@@ -399,6 +399,7 @@ func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
|
||||
Value: encodedValue,
|
||||
Owner: v.owner,
|
||||
})
|
||||
i++
|
||||
}
|
||||
log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("get table done")
|
||||
diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
|
||||
|
||||
120
vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go
generated
vendored
Normal file
120
vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
package networkdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type nodeState int
|
||||
|
||||
const (
|
||||
nodeNotFound nodeState = -1
|
||||
nodeActiveState nodeState = 0
|
||||
nodeLeftState nodeState = 1
|
||||
nodeFailedState nodeState = 2
|
||||
)
|
||||
|
||||
var nodeStateName = map[nodeState]string{
|
||||
-1: "NodeNotFound",
|
||||
0: "NodeActive",
|
||||
1: "NodeLeft",
|
||||
2: "NodeFailed",
|
||||
}
|
||||
|
||||
// findNode search the node into the 3 node lists and returns the node pointer and the list
|
||||
// where it got found
|
||||
func (nDB *NetworkDB) findNode(nodeName string) (*node, nodeState, map[string]*node) {
|
||||
for i, nodes := range []map[string]*node{
|
||||
nDB.nodes,
|
||||
nDB.leftNodes,
|
||||
nDB.failedNodes,
|
||||
} {
|
||||
if n, ok := nodes[nodeName]; ok {
|
||||
return n, nodeState(i), nodes
|
||||
}
|
||||
}
|
||||
return nil, nodeNotFound, nil
|
||||
}
|
||||
|
||||
// changeNodeState changes the state of the node specified, returns true if the node was moved,
|
||||
// false if there was no need to change the node state. Error will be returned if the node does not
|
||||
// exists
|
||||
func (nDB *NetworkDB) changeNodeState(nodeName string, newState nodeState) (bool, error) {
|
||||
n, currState, m := nDB.findNode(nodeName)
|
||||
if n == nil {
|
||||
return false, fmt.Errorf("node %s not found", nodeName)
|
||||
}
|
||||
|
||||
switch newState {
|
||||
case nodeActiveState:
|
||||
if currState == nodeActiveState {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
delete(m, nodeName)
|
||||
// reset the node reap time
|
||||
n.reapTime = 0
|
||||
nDB.nodes[nodeName] = n
|
||||
case nodeLeftState:
|
||||
if currState == nodeLeftState {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
delete(m, nodeName)
|
||||
nDB.leftNodes[nodeName] = n
|
||||
case nodeFailedState:
|
||||
if currState == nodeFailedState {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
delete(m, nodeName)
|
||||
nDB.failedNodes[nodeName] = n
|
||||
}
|
||||
|
||||
logrus.Infof("Node %s change state %s --> %s", nodeName, nodeStateName[currState], nodeStateName[newState])
|
||||
|
||||
if newState == nodeLeftState || newState == nodeFailedState {
|
||||
// set the node reap time, if not already set
|
||||
// It is possible that a node passes from failed to left and the reaptime was already set so keep that value
|
||||
if n.reapTime == 0 {
|
||||
n.reapTime = nodeReapInterval
|
||||
}
|
||||
// The node leave or fails, delete all the entries created by it.
|
||||
// If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
|
||||
// If the node instead left because was going down, then it makes sense to just delete all its state
|
||||
nDB.deleteNodeFromNetworks(n.Name)
|
||||
nDB.deleteNodeTableEntries(n.Name)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (nDB *NetworkDB) purgeReincarnation(mn *memberlist.Node) bool {
|
||||
for name, node := range nDB.nodes {
|
||||
if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
|
||||
logrus.Infof("Node %s/%s, is the new incarnation of the active node %s/%s", mn.Name, mn.Addr, name, node.Addr)
|
||||
nDB.changeNodeState(name, nodeLeftState)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for name, node := range nDB.failedNodes {
|
||||
if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
|
||||
logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
|
||||
nDB.changeNodeState(name, nodeLeftState)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for name, node := range nDB.leftNodes {
|
||||
if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
|
||||
logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
|
||||
nDB.changeNodeState(name, nodeLeftState)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
5
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
generated
vendored
5
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
generated
vendored
@@ -404,6 +404,11 @@ func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(
|
||||
vipLoop:
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) {
|
||||
// This checks the condition when ingress network is needed
|
||||
// but allocation has not been done.
|
||||
if _, ok := na.services[s.ID]; !ok {
|
||||
return false
|
||||
}
|
||||
continue vipLoop
|
||||
}
|
||||
for _, net := range specNetworks {
|
||||
|
||||
10
vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
generated
vendored
10
vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
generated
vendored
@@ -96,10 +96,10 @@ func (tr *TaskReaper) Run(ctx context.Context) {
|
||||
// Serviceless tasks can be cleaned up right away since they are not attached to a service.
|
||||
tr.cleanup = append(tr.cleanup, t.ID)
|
||||
}
|
||||
// tasks with desired state REMOVE that have progressed beyond SHUTDOWN can be cleaned up
|
||||
// tasks with desired state REMOVE that have progressed beyond COMPLETE can be cleaned up
|
||||
// right away
|
||||
for _, t := range removeTasks {
|
||||
if t.Status.State >= api.TaskStateShutdown {
|
||||
if t.Status.State >= api.TaskStateCompleted {
|
||||
tr.cleanup = append(tr.cleanup, t.ID)
|
||||
}
|
||||
}
|
||||
@@ -138,10 +138,10 @@ func (tr *TaskReaper) Run(ctx context.Context) {
|
||||
if t.Status.State >= api.TaskStateOrphaned && t.ServiceID == "" {
|
||||
tr.cleanup = append(tr.cleanup, t.ID)
|
||||
}
|
||||
// add tasks that have progressed beyond SHUTDOWN and have desired state REMOVE. These
|
||||
// add tasks that have progressed beyond COMPLETE and have desired state REMOVE. These
|
||||
// tasks are associated with slots that were removed as part of a service scale down
|
||||
// or service removal.
|
||||
if t.DesiredState == api.TaskStateRemove && t.Status.State >= api.TaskStateShutdown {
|
||||
if t.DesiredState == api.TaskStateRemove && t.Status.State >= api.TaskStateCompleted {
|
||||
tr.cleanup = append(tr.cleanup, t.ID)
|
||||
}
|
||||
case api.EventUpdateCluster:
|
||||
@@ -282,6 +282,8 @@ func (tr *TaskReaper) tick() {
|
||||
|
||||
// Stop stops the TaskReaper and waits for the main loop to exit.
|
||||
func (tr *TaskReaper) Stop() {
|
||||
// TODO(dperny) calling stop on the task reaper twice will cause a panic
|
||||
// because we try to close a channel that will already have been closed.
|
||||
close(tr.stopChan)
|
||||
<-tr.doneChan
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user