Compare commits

...

11 Commits

Author SHA1 Message Date
Paweł Gronowski
45873be4ae Merge pull request #50105 from jsternberg/revert-build-dangling
Revert "containerd: images overridden by a build are kept dangling"
2025-05-30 08:53:59 +00:00
Jonathan A. Sternberg
7994426e61 Revert "containerd: images overridden by a build are kept dangling"
This reverts commit 50a856157c.

Signed-off-by: Jonathan A. Sternberg <jonathan.sternberg@docker.com>
2025-05-29 13:23:45 -05:00
Cory Snider
f144264bae Merge pull request #50090 from corhere/libn/overlay-netip
libnetwork/d/overlay: use netip types more
2025-05-29 14:12:28 -04:00
Rob Murray
768cfaeb62 Merge pull request #50050 from robmry/nftables_internal_dns
nftables: rules for the internal DNS resolver
2025-05-29 19:11:27 +01:00
Rob Murray
d3289dda4b Add nftables NAT rules for internal DNS resolver
Signed-off-by: Rob Murray <rob.murray@docker.com>
2025-05-29 17:20:25 +01:00
Sebastiaan van Stijn
7a0bf747f5 Merge pull request #50038 from ctalledo/fix-for-50037
Update worker.Platforms() in builder-next worker.
2025-05-29 16:09:38 +02:00
Rob Murray
b43afbf898 Merge pull request #50098 from robmry/remove_docker-user_return_rule
iptables: Drop explicit RETURN rule from DOCKER-USER
2025-05-29 11:27:54 +01:00
Cesar Talledo
c299ba3b38 Update worker.Platforms() in builder-next worker.
Use platform MatchComparer when checking for matching platforms.

Also, add unit test to ensure the merging of defined and host-supported
platforms works correctly.

Signed-off-by: Cesar Talledo <cesar.talledo@docker.com>
2025-05-28 14:47:59 -07:00
Rob Murray
dc519a0f18 iptables: Drop explicit RETURN rule from DOCKER-USER
Signed-off-by: Rob Murray <rob.murray@docker.com>
2025-05-28 11:11:56 +01:00
Cory Snider
d188df0039 libn/d/overlay: use netip types more
The netip types are really useful for tracking state in the overlay
driver as they are hashable, unlike net.IP and friends, making them
directly useable as map keys. Converting between netip and net types is
fairly trivial, but fewer conversions is more ergonomic.

The NetworkDB entries for the overlay peer table encode the IP addresses
as strings. We need to parse them to some representation before
processing them further. Parse directly into netip types and pass those
values around to cut down on the number of conversions needed.

The peerDB needs to marshal the keys and entries to structs of hashable
values to be able to insert them into the SetMatrix. Use netip.Addr in
peerEntry so that peerEntry values can be directly inserted into the
SetMatrix without conversions. Use a hashable struct type as the
SetMatrix key to avoid having to marshal the whole struct to a string
and parse it back out.

Use netip.Addr as the map key for the driver's encryption map so the
values do not need to be converted to and from strings. Change the
encryption configuration methods to take netip types so the peerDB code
can pass netip values directly.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2025-05-27 13:47:11 -04:00
Cory Snider
0317f773a6 libnetwork/internal/setmatrix: make keys generic
Make the SetMatrix key's type generic so that e.g. netip.Addr values can
be used as matrix keys.

Signed-off-by: Cory Snider <csnider@mirantis.com>
2025-05-27 13:29:41 -04:00
38 changed files with 431 additions and 339 deletions

View File

@@ -21,7 +21,6 @@ import (
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
"github.com/docker/docker/builder/builder-next/exporter"
"github.com/docker/docker/builder/builder-next/exporter/mobyexporter"
"github.com/docker/docker/builder/builder-next/imagerefchecker"
mobyworker "github.com/docker/docker/builder/builder-next/worker"
@@ -169,10 +168,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
}
wo.Executor = exec
w, err := mobyworker.NewContainerdWorker(ctx, wo, exporter.Opt{
Callbacks: opt.Callbacks,
ImageTagger: opt.ImageTagger,
}, rt)
w, err := mobyworker.NewContainerdWorker(ctx, wo, opt.Callbacks, rt)
if err != nil {
return nil, err
}

View File

@@ -2,49 +2,39 @@ package exporter
import (
"context"
"fmt"
"strings"
distref "github.com/distribution/reference"
"github.com/containerd/log"
"github.com/distribution/reference"
"github.com/docker/docker/builder/builder-next/exporter/overrides"
"github.com/docker/docker/image"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/util/progress"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// Opt are options for the exporter wrapper.
type Opt struct {
// Callbacks contains callbacks used by the image exporter.
Callbacks BuildkitCallbacks
// ImageTagger is used to tag the image after it is exported.
ImageTagger ImageTagger
}
type BuildkitCallbacks struct {
// Exported is a Called when an image is exported by buildkit.
Exported func(ctx context.Context, id string, desc ocispec.Descriptor)
}
type ImageTagger interface {
TagImage(ctx context.Context, imageID image.ID, newTag distref.Named) error
// Named is a callback that is called when an image is created in the
// containerd image store by buildkit.
Named func(ctx context.Context, ref reference.NamedTagged, desc ocispec.Descriptor)
}
// Wraps the containerimage exporter's Resolve method to apply moby-specific
// overrides to the exporter attributes.
type imageExporterMobyWrapper struct {
exp exporter.Exporter
opt Opt
exp exporter.Exporter
callbacks BuildkitCallbacks
}
// NewWrapper returns an exporter wrapper that applies moby specific attributes
// and hooks the export process.
func NewWrapper(exp exporter.Exporter, opt Opt) (exporter.Exporter, error) {
func NewWrapper(exp exporter.Exporter, callbacks BuildkitCallbacks) (exporter.Exporter, error) {
return &imageExporterMobyWrapper{
exp: exp,
opt: opt,
exp: exp,
callbacks: callbacks,
}, nil
}
@@ -57,9 +47,7 @@ func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporter
if err != nil {
return nil, err
}
// Force the exporter to not use a name so it always creates a dangling image.
exporterAttrs[string(exptypes.OptKeyName)] = ""
exporterAttrs[string(exptypes.OptKeyName)] = strings.Join(reposAndTags, ",")
exporterAttrs[string(exptypes.OptKeyUnpack)] = "true"
if _, has := exporterAttrs[string(exptypes.OptKeyDanglingPrefix)]; !has {
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
@@ -73,15 +61,13 @@ func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporter
return &imageExporterInstanceWrapper{
ExporterInstance: inst,
reposAndTags: reposAndTags,
opt: e.opt,
callbacks: e.callbacks,
}, nil
}
type imageExporterInstanceWrapper struct {
exporter.ExporterInstance
reposAndTags []string
opt Opt
callbacks BuildkitCallbacks
}
func (i *imageExporterInstanceWrapper) Export(ctx context.Context, src *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
@@ -92,31 +78,38 @@ func (i *imageExporterInstanceWrapper) Export(ctx context.Context, src *exporter
desc := ref.Descriptor()
imageID := out[exptypes.ExporterImageDigestKey]
if i.opt.Callbacks.Exported != nil {
i.opt.Callbacks.Exported(ctx, imageID, desc)
if i.callbacks.Exported != nil {
i.callbacks.Exported(ctx, imageID, desc)
}
err = i.processNamed(ctx, image.ID(imageID), out, desc)
return out, ref, err
if i.callbacks.Named != nil {
i.processNamedCallback(ctx, out, desc)
}
return out, ref, nil
}
func (i *imageExporterInstanceWrapper) processNamed(ctx context.Context, imageID image.ID, out map[string]string, desc ocispec.Descriptor) error {
if len(i.reposAndTags) == 0 {
return nil
func (i *imageExporterInstanceWrapper) processNamedCallback(ctx context.Context, out map[string]string, desc ocispec.Descriptor) {
// TODO(vvoland): Change to exptypes.ExporterImageNameKey when BuildKit v0.21 is vendored.
imageName := out["image.name"]
if imageName == "" {
log.G(ctx).Warn("image named with empty image.name produced by buildkit")
return
}
for _, repoAndTag := range i.reposAndTags {
newTag, err := distref.ParseNamed(repoAndTag)
for _, name := range strings.Split(imageName, ",") {
ref, err := reference.ParseNormalizedNamed(name)
if err != nil {
return err
// Shouldn't happen, but log if it does and continue.
log.G(ctx).WithFields(log.Fields{
"name": name,
"error": err,
}).Warn("image named with invalid reference produced by buildkit")
continue
}
done := progress.OneOff(ctx, fmt.Sprintf("naming to %s", newTag))
if err := i.opt.ImageTagger.TagImage(ctx, imageID, newTag); err != nil {
return done(err)
if namedTagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged); ok {
i.callbacks.Named(ctx, namedTagged, desc)
}
done(nil)
}
out[exptypes.ExporterImageNameKey] = strings.Join(i.reposAndTags, ",")
return nil
}

View File

@@ -16,11 +16,11 @@ import (
// ContainerdWorker is a local worker instance with dedicated snapshotter, cache, and so on.
type ContainerdWorker struct {
*base.Worker
opt exporter.Opt
callbacks exporter.BuildkitCallbacks
}
// NewContainerdWorker instantiates a local worker.
func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt, opt exporter.Opt, rt nethttp.RoundTripper) (*ContainerdWorker, error) {
func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt, callbacks exporter.BuildkitCallbacks, rt nethttp.RoundTripper) (*ContainerdWorker, error) {
bw, err := base.NewWorker(ctx, wo)
if err != nil {
return nil, err
@@ -35,7 +35,7 @@ func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt, opt exporter.Op
log.G(ctx).Warnf("Could not register builder http source: %s", err)
}
return &ContainerdWorker{Worker: bw, opt: opt}, nil
return &ContainerdWorker{Worker: bw, callbacks: callbacks}, nil
}
// Exporter returns exporter by name
@@ -46,7 +46,7 @@ func (w *ContainerdWorker) Exporter(name string, sm *session.Manager) (bkexporte
if err != nil {
return nil, err
}
return exporter.NewWrapper(exp, w.opt)
return exporter.NewWrapper(exp, w.callbacks)
default:
return w.Worker.Exporter(name, sm)
}

View File

@@ -158,15 +158,7 @@ func (w *Worker) Labels() map[string]string {
// Platforms returns one or more platforms supported by the image.
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
if noCache {
pm := make(map[string]struct{}, len(w.Opt.Platforms))
for _, p := range w.Opt.Platforms {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[platforms.Format(p)]; !ok {
w.Opt.Platforms = append(w.Opt.Platforms, p)
}
}
w.Opt.Platforms = mergePlatforms(w.Opt.Platforms, archutil.SupportedPlatforms(noCache))
}
if len(w.Opt.Platforms) == 0 {
return []ocispec.Platform{platforms.DefaultSpec()}
@@ -174,6 +166,30 @@ func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
return w.Opt.Platforms
}
// mergePlatforms merges the defined platforms with the supported platforms
// and returns a new slice of platforms. It ensures no duplicates.
func mergePlatforms(defined, supported []ocispec.Platform) []ocispec.Platform {
result := []ocispec.Platform{}
matchers := make([]platforms.MatchComparer, len(defined))
for i, p := range defined {
result = append(result, p)
matchers[i] = platforms.Only(p)
}
for _, p := range supported {
exists := false
for _, m := range matchers {
if m.Match(p) {
exists = true
break
}
}
if !exists {
result = append(result, p)
}
}
return result
}
// GCPolicy returns automatic GC Policy
func (w *Worker) GCPolicy() []client.PruneInfo {
return w.Opt.GCPolicy

View File

@@ -0,0 +1,76 @@
package worker
import (
"testing"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestMergePlatforms(t *testing.T) {
defaultPlatform := ocispec.Platform{OS: "linux", Architecture: "amd64"}
otherPlatform := ocispec.Platform{OS: "windows", Architecture: "amd64"}
thirdPlatform := ocispec.Platform{OS: "darwin", Architecture: "arm64"}
tests := []struct {
name string
defined []ocispec.Platform
supported []ocispec.Platform
contains []ocispec.Platform
wantLen int
}{
{
name: "AllUnique",
defined: []ocispec.Platform{defaultPlatform},
supported: []ocispec.Platform{otherPlatform, thirdPlatform},
contains: []ocispec.Platform{defaultPlatform, otherPlatform, thirdPlatform},
wantLen: 3,
},
{
name: "SomeOverlap",
defined: []ocispec.Platform{defaultPlatform, otherPlatform},
supported: []ocispec.Platform{otherPlatform, thirdPlatform},
contains: []ocispec.Platform{defaultPlatform, otherPlatform, thirdPlatform},
wantLen: 3,
},
{
name: "AllOverlap",
defined: []ocispec.Platform{defaultPlatform, otherPlatform},
supported: []ocispec.Platform{defaultPlatform, otherPlatform},
contains: []ocispec.Platform{defaultPlatform, otherPlatform},
wantLen: 2,
},
{
name: "EmptySupported",
defined: []ocispec.Platform{defaultPlatform},
supported: []ocispec.Platform{},
contains: []ocispec.Platform{defaultPlatform},
wantLen: 1,
},
{
name: "EmptyDefined",
defined: []ocispec.Platform{},
supported: []ocispec.Platform{defaultPlatform, otherPlatform},
contains: []ocispec.Platform{defaultPlatform, otherPlatform},
wantLen: 2,
},
{
name: "BothEmpty",
defined: []ocispec.Platform{},
supported: []ocispec.Platform{},
contains: []ocispec.Platform{},
wantLen: 0,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := mergePlatforms(tc.defined, tc.supported)
assert.Equal(t, len(got), tc.wantLen)
for _, p := range tc.contains {
assert.Assert(t, is.Contains(got, p))
}
})
}
}

View File

@@ -431,6 +431,7 @@ func initBuildkit(ctx context.Context, d *daemon.Daemon) (_ builderOptions, clos
ContainerdNamespace: cfg.ContainerdNamespace,
Callbacks: exporter.BuildkitCallbacks{
Exported: d.ImageExportedByBuildkit,
Named: d.ImageNamedByBuildkit,
},
CDISpecDirs: cdiSpecDirs,
})

View File

@@ -3,6 +3,7 @@ package daemon
import (
"context"
"github.com/distribution/reference"
"github.com/docker/docker/api/types/events"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -14,3 +15,10 @@ import (
func (daemon *Daemon) ImageExportedByBuildkit(ctx context.Context, id string, desc ocispec.Descriptor) {
daemon.imageService.LogImageEvent(ctx, id, id, events.ActionCreate)
}
// ImageNamedByBuildkit is a callback that is called when an image is tagged by buildkit.
// Note: It is only called if the buildkit didn't call the image service itself to perform the tagging.
// Currently this only happens when the containerd image store is used.
func (daemon *Daemon) ImageNamedByBuildkit(ctx context.Context, ref reference.NamedTagged, desc ocispec.Descriptor) {
daemon.imageService.LogImageEvent(ctx, desc.Digest.String(), reference.FamiliarString(ref), events.ActionTag)
}

View File

@@ -45,7 +45,6 @@ Table `filter`:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
<details>
@@ -72,7 +71,6 @@ Table `filter`:
-A DOCKER-FORWARD -i docker0 -j ACCEPT
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -60,7 +60,6 @@ The filter table is:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
<details>
@@ -99,7 +98,6 @@ The filter table is:
-A DOCKER-ISOLATION-STAGE-1 -i docker_gwbridge ! -o docker_gwbridge -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o docker_gwbridge -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -66,7 +66,6 @@ The filter table is updated as follows:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
<details>
@@ -99,7 +98,6 @@ The filter table is updated as follows:
-A DOCKER-ISOLATION-STAGE-1 ! -d 192.0.2.0/24 -i bridgeICC -j DROP
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -59,7 +59,6 @@ The filter and nat tables are identical to [nat mode][0]:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
-P INPUT ACCEPT
@@ -90,7 +89,6 @@ The filter and nat tables are identical to [nat mode][0]:
-A DOCKER-ISOLATION-STAGE-1 -i bridge1 ! -o bridge1 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o bridge1 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -56,7 +56,6 @@ The filter table is:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
<details>
@@ -89,7 +88,6 @@ The filter table is:
-A DOCKER-ISOLATION-STAGE-1 -i bridge1 ! -o bridge1 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o bridge1 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -58,7 +58,6 @@ The filter table is:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
<details>
@@ -93,7 +92,6 @@ The filter table is:
-A DOCKER-ISOLATION-STAGE-1 -i bridge1 ! -o bridge1 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o bridge1 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -60,7 +60,6 @@ The filter table is the same as with the userland proxy enabled.
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
-P INPUT ACCEPT
@@ -91,7 +90,6 @@ The filter table is the same as with the userland proxy enabled.
-A DOCKER-ISOLATION-STAGE-1 -i bridge1 ! -o bridge1 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o bridge1 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -60,7 +60,6 @@ The filter table is:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
<details>
@@ -97,7 +96,6 @@ The filter table is:
-A DOCKER-ISOLATION-STAGE-1 -i bridge1 ! -o bridge1 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o bridge1 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -56,7 +56,6 @@ The filter table is updated as follows:
Chain DOCKER-USER (1 references)
num pkts bytes target prot opt in out source destination
1 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0
<details>
@@ -90,7 +89,6 @@ The filter table is updated as follows:
-A DOCKER-ISOLATION-STAGE-1 -i bridge1 ! -o bridge1 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-2 -o bridge1 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-USER -j RETURN
</details>

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"hash/fnv"
"net"
"net/netip"
"strconv"
"sync"
"syscall"
@@ -90,7 +91,7 @@ func (s *spi) String() string {
}
type encrMap struct {
nodes map[string][]*spi
nodes map[netip.Addr][]*spi
sync.Mutex
}
@@ -100,7 +101,7 @@ func (e *encrMap) String() string {
b := new(bytes.Buffer)
for k, v := range e.nodes {
b.WriteString("\n")
b.WriteString(k)
b.WriteString(k.String())
b.WriteString(":")
b.WriteString("[")
for _, s := range v {
@@ -112,7 +113,7 @@ func (e *encrMap) String() string {
return b.String()
}
func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) error {
func (d *driver) checkEncryption(nid string, rIP netip.Addr, isLocal, add bool) error {
log.G(context.TODO()).Debugf("checkEncryption(%.7s, %v, %t)", nid, rIP, isLocal)
n := d.network(nid)
@@ -126,13 +127,13 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) erro
lIP := d.bindAddress
aIP := d.advertiseAddress
nodes := map[string]net.IP{}
nodes := map[netip.Addr]struct{}{}
switch {
case isLocal:
if err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
if !aIP.Equal(pEntry.vtep) {
nodes[pEntry.vtep.String()] = pEntry.vtep
if err := d.peerDbNetworkWalk(nid, func(_ netip.Addr, _ net.HardwareAddr, pEntry *peerEntry) bool {
if aIP != pEntry.vtep {
nodes[pEntry.vtep] = struct{}{}
}
return false
}); err != nil {
@@ -140,14 +141,14 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) erro
}
default:
if len(d.network(nid).endpoints) > 0 {
nodes[rIP.String()] = rIP
nodes[rIP] = struct{}{}
}
}
log.G(context.TODO()).Debugf("List of nodes: %s", nodes)
if add {
for _, rIP := range nodes {
for rIP := range nodes {
if err := setupEncryption(lIP, aIP, rIP, d.secMap, d.keys); err != nil {
log.G(context.TODO()).Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
}
@@ -165,19 +166,18 @@ func (d *driver) checkEncryption(nid string, rIP net.IP, isLocal, add bool) erro
// setupEncryption programs the encryption parameters for secure communication
// between the local node and a remote node.
func setupEncryption(localIP, advIP, remoteIP net.IP, em *encrMap, keys []*key) error {
func setupEncryption(localIP, advIP, remoteIP netip.Addr, em *encrMap, keys []*key) error {
log.G(context.TODO()).Debugf("Programming encryption between %s and %s", localIP, remoteIP)
rIPs := remoteIP.String()
indices := make([]*spi, 0, len(keys))
for i, k := range keys {
spis := &spi{buildSPI(advIP, remoteIP, k.tag), buildSPI(remoteIP, advIP, k.tag)}
spis := &spi{buildSPI(advIP.AsSlice(), remoteIP.AsSlice(), k.tag), buildSPI(remoteIP.AsSlice(), advIP.AsSlice(), k.tag)}
dir := reverse
if i == 0 {
dir = bidir
}
fSA, rSA, err := programSA(localIP, remoteIP, spis, k, dir, true)
fSA, rSA, err := programSA(localIP.AsSlice(), remoteIP.AsSlice(), spis, k, dir, true)
if err != nil {
log.G(context.TODO()).Warn(err)
}
@@ -192,15 +192,15 @@ func setupEncryption(localIP, advIP, remoteIP net.IP, em *encrMap, keys []*key)
}
em.Lock()
em.nodes[rIPs] = indices
em.nodes[remoteIP] = indices
em.Unlock()
return nil
}
func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
func removeEncryption(localIP, remoteIP netip.Addr, em *encrMap) error {
em.Lock()
indices, ok := em.nodes[remoteIP.String()]
indices, ok := em.nodes[remoteIP]
em.Unlock()
if !ok {
return nil
@@ -210,7 +210,7 @@ func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
if i == 0 {
dir = bidir
}
fSA, rSA, err := programSA(localIP, remoteIP, idxs, nil, dir, false)
fSA, rSA, err := programSA(localIP.AsSlice(), remoteIP.AsSlice(), idxs, nil, dir, false)
if err != nil {
log.G(context.TODO()).Warn(err)
}
@@ -477,7 +477,7 @@ func buildAeadAlgo(k *key, s int) *netlink.XfrmStateAlgo {
}
}
func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error {
func (d *driver) secMapWalk(f func(netip.Addr, []*spi) ([]*spi, bool)) error {
d.secMap.Lock()
for node, indices := range d.secMap.nodes {
idxs, stop := f(node, indices)
@@ -498,7 +498,7 @@ func (d *driver) setKeys(keys []*key) error {
// Accept the encryption keys and clear any stale encryption map
d.Lock()
d.keys = keys
d.secMap = &encrMap{nodes: map[string][]*spi{}}
d.secMap = &encrMap{nodes: map[netip.Addr][]*spi{}}
d.Unlock()
log.G(context.TODO()).Debugf("Initial encryption keys: %v", keys)
return nil
@@ -547,9 +547,8 @@ func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
return types.InvalidParameterErrorf("attempting to both make a key (index %d) primary and delete it", priIdx)
}
d.secMapWalk(func(rIPs string, spis []*spi) ([]*spi, bool) {
rIP := net.ParseIP(rIPs)
return updateNodeKey(lIP, aIP, rIP, spis, d.keys, newIdx, priIdx, delIdx), false
d.secMapWalk(func(rIP netip.Addr, spis []*spi) ([]*spi, bool) {
return updateNodeKey(lIP.AsSlice(), aIP.AsSlice(), rIP.AsSlice(), spis, d.keys, newIdx, priIdx, delIdx), false
})
// swap primary

View File

@@ -6,10 +6,12 @@ import (
"context"
"fmt"
"net"
"net/netip"
"syscall"
"github.com/containerd/log"
"github.com/docker/docker/libnetwork/driverapi"
"github.com/docker/docker/libnetwork/internal/netiputil"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/ns"
"github.com/docker/docker/libnetwork/osl"
@@ -105,7 +107,7 @@ func (d *driver) Join(ctx context.Context, nid, eid string, sboxKey string, jinf
if sub == s {
continue
}
if err = jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
if err = jinfo.AddStaticRoute(netiputil.ToIPNet(sub.subnetIP), types.NEXTHOP, s.gwIP.Addr().AsSlice()); err != nil {
log.G(ctx).Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
}
}
@@ -117,9 +119,9 @@ func (d *driver) Join(ctx context.Context, nid, eid string, sboxKey string, jinf
}
}
d.peerAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, d.advertiseAddress, true)
d.peerAdd(nid, eid, ep.addr, ep.mac, d.advertiseAddress, true)
if err = d.checkEncryption(nid, nil, true, true); err != nil {
if err = d.checkEncryption(nid, netip.Addr{}, true, true); err != nil {
log.G(ctx).Warn(err)
}
@@ -172,34 +174,34 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
// Ignore local peers. We already know about them and they
// should not be added to vxlan fdb.
if net.ParseIP(peer.TunnelEndpointIP).Equal(d.advertiseAddress) {
if addr, _ := netip.ParseAddr(peer.TunnelEndpointIP); addr == d.advertiseAddress {
return
}
addr, err := types.ParseCIDR(peer.EndpointIP)
addr, err := netip.ParsePrefix(peer.EndpointIP)
if err != nil {
log.G(context.TODO()).Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
log.G(context.TODO()).WithError(err).Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
return
}
mac, err := net.ParseMAC(peer.EndpointMAC)
if err != nil {
log.G(context.TODO()).Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
log.G(context.TODO()).WithError(err).Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
return
}
vtep := net.ParseIP(peer.TunnelEndpointIP)
if vtep == nil {
log.G(context.TODO()).Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
vtep, err := netip.ParseAddr(peer.TunnelEndpointIP)
if err != nil {
log.G(context.TODO()).WithError(err).Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
return
}
if etype == driverapi.Delete {
d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, false)
d.peerDelete(nid, eid, addr, mac, vtep, false)
return
}
d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, false)
d.peerAdd(nid, eid, addr, mac, vtep, false)
}
// Leave method is invoked when a Sandbox detaches from an endpoint.
@@ -219,7 +221,7 @@ func (d *driver) Leave(nid, eid string) error {
return types.InternalMaskableErrorf("could not find endpoint with id %s", eid)
}
d.peerDelete(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, d.advertiseAddress, true)
d.peerDelete(nid, eid, ep.addr, ep.mac, d.advertiseAddress, true)
n.leaveSandbox()

View File

@@ -6,9 +6,11 @@ import (
"context"
"fmt"
"net"
"net/netip"
"github.com/containerd/log"
"github.com/docker/docker/libnetwork/driverapi"
"github.com/docker/docker/libnetwork/internal/netiputil"
"github.com/docker/docker/libnetwork/netutils"
"github.com/docker/docker/libnetwork/ns"
)
@@ -20,7 +22,7 @@ type endpoint struct {
nid string
ifName string
mac net.HardwareAddr
addr *net.IPNet
addr netip.Prefix
}
func (n *network) endpoint(eid string) *endpoint {
@@ -61,12 +63,13 @@ func (d *driver) CreateEndpoint(_ context.Context, nid, eid string, ifInfo drive
}
ep := &endpoint{
id: eid,
nid: n.id,
addr: ifInfo.Address(),
mac: ifInfo.MacAddress(),
id: eid,
nid: n.id,
mac: ifInfo.MacAddress(),
}
if ep.addr == nil {
var ok bool
ep.addr, ok = netiputil.ToPrefix(ifInfo.Address())
if !ok {
return fmt.Errorf("create endpoint was not passed interface IP address")
}
@@ -75,7 +78,7 @@ func (d *driver) CreateEndpoint(_ context.Context, nid, eid string, ifInfo drive
}
if ep.mac == nil {
ep.mac = netutils.GenerateMACFromIP(ep.addr.IP)
ep.mac = netutils.GenerateMACFromIP(ep.addr.Addr().AsSlice())
if err := ifInfo.SetMacAddress(ep.mac); err != nil {
return err
}

View File

@@ -6,7 +6,7 @@ import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"os"
"path/filepath"
"runtime"
@@ -18,6 +18,7 @@ import (
"github.com/docker/docker/internal/nlwrap"
"github.com/docker/docker/libnetwork/driverapi"
"github.com/docker/docker/libnetwork/drivers/overlay/overlayutils"
"github.com/docker/docker/libnetwork/internal/netiputil"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/ns"
"github.com/docker/docker/libnetwork/osl"
@@ -42,8 +43,8 @@ type subnet struct {
brName string
vni uint32
initErr error
subnetIP *net.IPNet
gwIP *net.IPNet
subnetIP netip.Prefix
gwIP netip.Prefix
}
type network struct {
@@ -138,11 +139,9 @@ func (d *driver) CreateNetwork(ctx context.Context, id string, option map[string
}
for i, ipd := range ipV4Data {
s := &subnet{
subnetIP: ipd.Pool,
gwIP: ipd.Gateway,
vni: vnis[i],
}
s := &subnet{vni: vnis[i]}
s.subnetIP, _ = netiputil.ToPrefix(ipd.Pool)
s.gwIP, _ = netiputil.ToPrefix(ipd.Gateway)
n.subnets = append(n.subnets, s)
}
@@ -427,7 +426,7 @@ func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error
// create a bridge and vxlan device for this subnet and move it to the sandbox
sbox := n.sbox
if err := sbox.AddInterface(context.TODO(), brName, "br", "", osl.WithIPv4Address(s.gwIP), osl.WithIsBridge(true)); err != nil {
if err := sbox.AddInterface(context.TODO(), brName, "br", "", osl.WithIPv4Address(netiputil.ToIPNet(s.gwIP)), osl.WithIsBridge(true)); err != nil {
return fmt.Errorf("bridge creation in sandbox failed for subnet %q: %v", s.subnetIP.String(), err)
}
@@ -614,15 +613,13 @@ func (n *network) sandbox() *osl.Namespace {
}
// getSubnetforIP returns the subnet to which the given IP belongs
func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
func (n *network) getSubnetforIP(ip netip.Prefix) *subnet {
for _, s := range n.subnets {
// first check if the mask lengths are the same
i, _ := s.subnetIP.Mask.Size()
j, _ := ip.Mask.Size()
if i != j {
if s.subnetIP.Bits() != ip.Bits() {
continue
}
if s.subnetIP.Contains(ip.IP) {
if s.subnetIP.Contains(ip.Addr()) {
return s
}
}

View File

@@ -7,7 +7,7 @@ package overlay
import (
"context"
"fmt"
"net"
"net/netip"
"sync"
"github.com/containerd/log"
@@ -28,7 +28,7 @@ const (
var _ discoverapi.Discover = (*driver)(nil)
type driver struct {
bindAddress, advertiseAddress net.IP
bindAddress, advertiseAddress netip.Addr
config map[string]interface{}
peerDb peerNetworkMap
@@ -48,7 +48,7 @@ func Register(r driverapi.Registerer, config map[string]interface{}) error {
peerDb: peerNetworkMap{
mp: map[string]*peerMap{},
},
secMap: &encrMap{nodes: map[string][]*spi{}},
secMap: &encrMap{nodes: map[netip.Addr][]*spi{}},
config: config,
}
return r.RegisterDriver(NetworkType, d, driverapi.Capability{
@@ -78,16 +78,17 @@ func (d *driver) isIPv6Transport() (bool, error) {
// from the address family of our own advertise address. This is a
// reasonable inference to make as Linux VXLAN links do not support
// mixed-address-family remote peers.
if d.advertiseAddress == nil {
if !d.advertiseAddress.IsValid() {
return false, fmt.Errorf("overlay: cannot determine address family of transport: the local data-plane address is not currently known")
}
return d.advertiseAddress.To4() == nil, nil
return d.advertiseAddress.Is6(), nil
}
func (d *driver) nodeJoin(data discoverapi.NodeDiscoveryData) error {
if data.Self {
advAddr, bindAddr := net.ParseIP(data.Address), net.ParseIP(data.BindAddress)
if advAddr == nil {
advAddr, _ := netip.ParseAddr(data.Address)
bindAddr, _ := netip.ParseAddr(data.BindAddress)
if !advAddr.IsValid() {
return fmt.Errorf("invalid discovery data")
}
d.Lock()

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"net"
"net/netip"
"sync"
"syscall"
@@ -17,52 +18,16 @@ import (
const ovPeerTable = "overlay_peer_table"
type peerKey struct {
peerIP net.IP
peerMac net.HardwareAddr
}
type peerEntry struct {
eid string
vtep net.IP
peerIPMask net.IPMask
vtep netip.Addr
prefixBits int // number of 1-bits in network mask of peerIP
isLocal bool
}
func (p *peerEntry) MarshalDB() peerEntryDB {
ones, bits := p.peerIPMask.Size()
return peerEntryDB{
eid: p.eid,
vtep: p.vtep.String(),
peerIPMaskOnes: ones,
peerIPMaskBits: bits,
isLocal: p.isLocal,
}
}
// This the structure saved into the set (SetMatrix), due to the implementation of it
// the value inserted in the set has to be Hashable so the []byte had to be converted into
// strings
type peerEntryDB struct {
eid string
vtep string
peerIPMaskOnes int
peerIPMaskBits int
isLocal bool
}
func (p *peerEntryDB) UnMarshalDB() peerEntry {
return peerEntry{
eid: p.eid,
vtep: net.ParseIP(p.vtep),
peerIPMask: net.CIDRMask(p.peerIPMaskOnes, p.peerIPMaskBits),
isLocal: p.isLocal,
}
}
type peerMap struct {
// set of peerEntry, note the values have to be objects and not pointers to maintain the proper equality checks
mp setmatrix.SetMatrix[peerEntryDB]
mp setmatrix.SetMatrix[ipmac, peerEntry]
sync.Mutex
}
@@ -72,28 +37,7 @@ type peerNetworkMap struct {
sync.Mutex
}
func (pKey peerKey) String() string {
return fmt.Sprintf("%s %s", pKey.peerIP, pKey.peerMac)
}
func (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {
ipB, err := state.Token(true, nil)
if err != nil {
return err
}
pKey.peerIP = net.ParseIP(string(ipB))
macB, err := state.Token(true, nil)
if err != nil {
return err
}
pKey.peerMac, err = net.ParseMAC(string(macB))
return err
}
func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
func (d *driver) peerDbWalk(f func(string, netip.Addr, net.HardwareAddr, *peerEntry) bool) error {
d.peerDb.Lock()
nids := []string{}
for nid := range d.peerDb.mp {
@@ -102,14 +46,14 @@ func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
d.peerDb.Unlock()
for _, nid := range nids {
d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
return f(nid, pKey, pEntry)
d.peerDbNetworkWalk(nid, func(peerIP netip.Addr, peerMac net.HardwareAddr, pEntry *peerEntry) bool {
return f(nid, peerIP, peerMac, pEntry)
})
}
return nil
}
func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool) error {
func (d *driver) peerDbNetworkWalk(nid string, f func(netip.Addr, net.HardwareAddr, *peerEntry) bool) error {
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
d.peerDb.Unlock()
@@ -118,22 +62,18 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool
return nil
}
mp := map[string]peerEntry{}
mp := map[ipmac]peerEntry{}
pMap.Lock()
for _, pKeyStr := range pMap.mp.Keys() {
entryDBList, ok := pMap.mp.Get(pKeyStr)
for _, pKey := range pMap.mp.Keys() {
entryDBList, ok := pMap.mp.Get(pKey)
if ok {
mp[pKeyStr] = entryDBList[0].UnMarshalDB()
mp[pKey] = entryDBList[0]
}
}
pMap.Unlock()
for pKeyStr, pEntry := range mp {
var pKey peerKey
if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
log.G(context.TODO()).Warnf("Peer key scan on network %s failed: %v", nid, err)
}
if f(&pKey, &pEntry) {
for pKey, pEntry := range mp {
if f(pKey.ip, pKey.mac.HardwareAddr(), &pEntry) {
return nil
}
}
@@ -141,12 +81,14 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool
return nil
}
func (d *driver) peerDbSearch(nid string, peerIP net.IP) (*peerKey, *peerEntry, error) {
var pKeyMatched *peerKey
func (d *driver) peerDbSearch(nid string, peerIP netip.Addr) (netip.Addr, net.HardwareAddr, *peerEntry, error) {
var peerIPMatched netip.Addr
var peerMacMatched net.HardwareAddr
var pEntryMatched *peerEntry
err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
if pKey.peerIP.Equal(peerIP) {
pKeyMatched = pKey
err := d.peerDbNetworkWalk(nid, func(ip netip.Addr, mac net.HardwareAddr, pEntry *peerEntry) bool {
if ip == peerIP {
peerIPMatched = ip
peerMacMatched = mac
pEntryMatched = pEntry
return true
}
@@ -154,17 +96,17 @@ func (d *driver) peerDbSearch(nid string, peerIP net.IP) (*peerKey, *peerEntry,
return false
})
if err != nil {
return nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
return netip.Addr{}, nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
}
if pKeyMatched == nil || pEntryMatched == nil {
return nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
if !peerIPMatched.IsValid() || pEntryMatched == nil {
return netip.Addr{}, nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
}
return pKeyMatched, pEntryMatched, nil
return peerIPMatched, peerMacMatched, pEntryMatched, nil
}
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
func (d *driver) peerDbAdd(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr, isLocal bool) (bool, int) {
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
@@ -173,30 +115,27 @@ func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
}
d.peerDb.Unlock()
pKey := peerKey{
peerIP: peerIP,
peerMac: peerMac,
}
pKey := ipmacOf(peerIP.Addr(), peerMac)
pEntry := peerEntry{
eid: eid,
vtep: vtep,
peerIPMask: peerIPMask,
prefixBits: peerIP.Bits(),
isLocal: isLocal,
}
pMap.Lock()
defer pMap.Unlock()
b, i := pMap.mp.Insert(pKey.String(), pEntry.MarshalDB())
b, i := pMap.mp.Insert(pKey, pEntry)
if i != 1 {
// Transient case, there is more than one endpoint that is using the same IP,MAC pair
s, _ := pMap.mp.String(pKey.String())
s, _ := pMap.mp.String(pKey)
log.G(context.TODO()).Warnf("peerDbAdd transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s)
}
return b, i
}
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
func (d *driver) peerDbDelete(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr, isLocal bool) (bool, int) {
d.peerDb.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
@@ -205,25 +144,22 @@ func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPM
}
d.peerDb.Unlock()
pKey := peerKey{
peerIP: peerIP,
peerMac: peerMac,
}
pKey := ipmacOf(peerIP.Addr(), peerMac)
pEntry := peerEntry{
eid: eid,
vtep: vtep,
peerIPMask: peerIPMask,
prefixBits: peerIP.Bits(),
isLocal: isLocal,
}
pMap.Lock()
defer pMap.Unlock()
b, i := pMap.mp.Remove(pKey.String(), pEntry.MarshalDB())
b, i := pMap.mp.Remove(pKey, pEntry)
if i != 0 {
// Transient case, there is more than one endpoint that is using the same IP,MAC pair
s, _ := pMap.mp.String(pKey.String())
log.G(context.TODO()).Warnf("peerDbDelete transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s)
s, _ := pMap.mp.String(pKey)
log.G(context.TODO()).Warnf("peerDbDelete transient condition - Key:%s cardinality:%d db state:%s", pKey, i, s)
}
return b, i
}
@@ -248,28 +184,28 @@ func (d *driver) initSandboxPeerDB(nid string) {
}
func (d *driver) peerInitOp(nid string) error {
return d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
return d.peerDbNetworkWalk(nid, func(peerIP netip.Addr, peerMac net.HardwareAddr, pEntry *peerEntry) bool {
// Local entries do not need to be added
if pEntry.isLocal {
return false
}
d.peerAddOp(nid, pEntry.eid, pKey.peerIP, pEntry.peerIPMask, pKey.peerMac, pEntry.vtep, false, pEntry.isLocal)
d.peerAddOp(nid, pEntry.eid, netip.PrefixFrom(peerIP, pEntry.prefixBits), peerMac, pEntry.vtep, false, pEntry.isLocal)
// return false to loop on all entries
return false
})
}
func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, localPeer bool) {
func (d *driver) peerAdd(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr, localPeer bool) {
d.peerOpMu.Lock()
defer d.peerOpMu.Unlock()
err := d.peerAddOp(nid, eid, peerIP, peerIPMask, peerMac, vtep, true, localPeer)
err := d.peerAddOp(nid, eid, peerIP, peerMac, vtep, true, localPeer)
if err != nil {
log.G(context.TODO()).WithError(err).Warn("Peer add operation failed")
}
}
func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, updateDB, localPeer bool) error {
func (d *driver) peerAddOp(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr, updateDB, localPeer bool) error {
if err := validateID(nid, eid); err != nil {
return err
}
@@ -277,7 +213,7 @@ func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
var dbEntries int
var inserted bool
if updateDB {
inserted, dbEntries = d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer)
inserted, dbEntries = d.peerDbAdd(nid, eid, peerIP, peerMac, vtep, localPeer)
if !inserted {
log.G(context.TODO()).Warnf("Entry already present in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v",
nid, eid, peerIP, peerMac, localPeer, vtep)
@@ -302,14 +238,9 @@ func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
return nil
}
IP := &net.IPNet{
IP: peerIP,
Mask: peerIPMask,
}
s := n.getSubnetforIP(IP)
s := n.getSubnetforIP(peerIP)
if s == nil {
return fmt.Errorf("couldn't find the subnet %q in network %q", IP.String(), n.id)
return fmt.Errorf("couldn't find the subnet %q in network %q", peerIP.String(), n.id)
}
if err := n.joinSandbox(s, false); err != nil {
@@ -321,7 +252,7 @@ func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
}
// Add neighbor entry for the peer IP
if err := sbox.AddNeighbor(peerIP, peerMac, osl.WithLinkName(s.vxlanName)); err != nil {
if err := sbox.AddNeighbor(peerIP.Addr().AsSlice(), peerMac, osl.WithLinkName(s.vxlanName)); err != nil {
if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 1 {
// We are in the transient case so only the first configuration is programmed into the kernel
// Upon deletion if the active configuration is deleted the next one from the database will be restored
@@ -332,28 +263,28 @@ func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
}
// Add fdb entry to the bridge for the peer mac
if err := sbox.AddNeighbor(vtep, peerMac, osl.WithLinkName(s.vxlanName), osl.WithFamily(syscall.AF_BRIDGE)); err != nil {
if err := sbox.AddNeighbor(vtep.AsSlice(), peerMac, osl.WithLinkName(s.vxlanName), osl.WithFamily(syscall.AF_BRIDGE)); err != nil {
return fmt.Errorf("could not add fdb entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
}
return nil
}
func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, localPeer bool) {
func (d *driver) peerDelete(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr, localPeer bool) {
d.peerOpMu.Lock()
defer d.peerOpMu.Unlock()
err := d.peerDeleteOp(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer)
err := d.peerDeleteOp(nid, eid, peerIP, peerMac, vtep, localPeer)
if err != nil {
log.G(context.TODO()).WithError(err).Warn("Peer delete operation failed")
}
}
func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, peerMac net.HardwareAddr, vtep net.IP, localPeer bool) error {
func (d *driver) peerDeleteOp(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr, localPeer bool) error {
if err := validateID(nid, eid); err != nil {
return err
}
deleted, dbEntries := d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer)
deleted, dbEntries := d.peerDbDelete(nid, eid, peerIP, peerMac, vtep, localPeer)
if !deleted {
log.G(context.TODO()).Warnf("Entry was not in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v",
nid, eid, peerIP, peerMac, localPeer, vtep)
@@ -375,16 +306,12 @@ func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPM
// Local peers do not have any local configuration to delete
if !localPeer {
IP := &net.IPNet{
IP: peerIP,
Mask: peerIPMask,
}
s := n.getSubnetforIP(IP)
s := n.getSubnetforIP(peerIP)
if s == nil {
return fmt.Errorf("could not find the subnet %q in network %q", IP.String(), n.id)
return fmt.Errorf("could not find the subnet %q in network %q", peerIP.String(), n.id)
}
// Remove fdb entry to the bridge for the peer mac
if err := sbox.DeleteNeighbor(vtep, peerMac, osl.WithLinkName(s.vxlanName)); err != nil {
if err := sbox.DeleteNeighbor(vtep.AsSlice(), peerMac, osl.WithLinkName(s.vxlanName)); err != nil {
if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 0 {
// We fall in here if there is a transient state and if the neighbor that is being deleted
// was never been configured into the kernel (we allow only 1 configuration at the time per <ip,mac> mapping)
@@ -394,7 +321,7 @@ func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPM
}
// Delete neighbor entry for the peer IP
if err := sbox.DeleteNeighbor(peerIP, peerMac, osl.WithLinkName(s.vxlanName), osl.WithFamily(syscall.AF_BRIDGE)); err != nil {
if err := sbox.DeleteNeighbor(peerIP.Addr().AsSlice(), peerMac, osl.WithLinkName(s.vxlanName), osl.WithFamily(syscall.AF_BRIDGE)); err != nil {
return fmt.Errorf("could not delete neighbor entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
}
}
@@ -406,12 +333,12 @@ func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPM
// If there is still an entry into the database and the deletion went through without errors means that there is now no
// configuration active in the kernel.
// Restore one configuration for the <ip,mac> directly from the database, note that is guaranteed that there is one
peerKey, peerEntry, err := d.peerDbSearch(nid, peerIP)
peerIPAddr, peerMac, peerEntry, err := d.peerDbSearch(nid, peerIP.Addr())
if err != nil {
log.G(context.TODO()).Errorf("peerDeleteOp unable to restore a configuration for nid:%s ip:%v mac:%v err:%s", nid, peerIP, peerMac, err)
return err
}
return d.peerAddOp(nid, peerEntry.eid, peerIP, peerEntry.peerIPMask, peerKey.peerMac, peerEntry.vtep, false, peerEntry.isLocal)
return d.peerAddOp(nid, peerEntry.eid, netip.PrefixFrom(peerIPAddr, peerEntry.prefixBits), peerMac, peerEntry.vtep, false, peerEntry.isLocal)
}
func (d *driver) peerFlush(nid string) {
@@ -434,7 +361,7 @@ func (d *driver) peerFlushOp(nid string) error {
}
func (d *driver) peerDBUpdateSelf() {
d.peerDbWalk(func(nid string, pkey *peerKey, pEntry *peerEntry) bool {
d.peerDbWalk(func(nid string, _ netip.Addr, _ net.HardwareAddr, pEntry *peerEntry) bool {
if pEntry.isLocal {
pEntry.vtep = d.advertiseAddress
}

View File

@@ -1,32 +0,0 @@
//go:build linux
package overlay
import (
"net"
"testing"
)
func TestPeerMarshal(t *testing.T) {
_, ipNet, _ := net.ParseCIDR("192.168.0.1/24")
p := &peerEntry{
eid: "eid",
isLocal: true,
peerIPMask: ipNet.Mask,
vtep: ipNet.IP,
}
entryDB := p.MarshalDB()
x := entryDB.UnMarshalDB()
if x.eid != p.eid {
t.Fatalf("Incorrect Unmarshalling for eid: %v != %v", x.eid, p.eid)
}
if x.isLocal != p.isLocal {
t.Fatalf("Incorrect Unmarshalling for isLocal: %v != %v", x.isLocal, p.isLocal)
}
if x.peerIPMask.String() != p.peerIPMask.String() {
t.Fatalf("Incorrect Unmarshalling for eid: %v != %v", x.peerIPMask, p.peerIPMask)
}
if x.vtep.String() != p.vtep.String() {
t.Fatalf("Incorrect Unmarshalling for eid: %v != %v", x.vtep, p.vtep)
}
}

View File

@@ -0,0 +1,52 @@
package overlay
// Handy utility types for making unhashable values hashable.
import (
"net"
"net/netip"
)
// macAddr is a hashable encoding of a MAC address.
type macAddr uint64
// macAddrOf converts a net.HardwareAddr to a macAddr.
func macAddrOf(mac net.HardwareAddr) macAddr {
if len(mac) != 6 {
return 0
}
return macAddr(mac[0])<<40 | macAddr(mac[1])<<32 | macAddr(mac[2])<<24 |
macAddr(mac[3])<<16 | macAddr(mac[4])<<8 | macAddr(mac[5])
}
// HardwareAddr converts a macAddr back to a net.HardwareAddr.
func (p macAddr) HardwareAddr() net.HardwareAddr {
mac := [6]byte{
byte(p >> 40), byte(p >> 32), byte(p >> 24),
byte(p >> 16), byte(p >> 8), byte(p),
}
return mac[:]
}
// String returns p.HardwareAddr().String().
func (p macAddr) String() string {
return p.HardwareAddr().String()
}
// ipmac is a hashable tuple of an IP address and a MAC address suitable for use as a map key.
type ipmac struct {
ip netip.Addr
mac macAddr
}
// ipmacOf is a convenience constructor for creating an ipmac from a [net.HardwareAddr].
func ipmacOf(ip netip.Addr, mac net.HardwareAddr) ipmac {
return ipmac{
ip: ip,
mac: macAddrOf(mac),
}
}
func (i ipmac) String() string {
return i.ip.String() + " " + i.mac.String()
}

View File

@@ -0,0 +1,29 @@
package overlay
import (
"net"
"net/netip"
"testing"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestMACAddrOf(t *testing.T) {
want := net.HardwareAddr{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
assert.DeepEqual(t, macAddrOf(want).HardwareAddr(), want)
}
func TestIPMACOf(t *testing.T) {
assert.Check(t, is.Equal(ipmacOf(netip.Addr{}, nil), ipmac{}))
assert.Check(t, is.Equal(
ipmacOf(
netip.MustParseAddr("11.22.33.44"),
net.HardwareAddr{0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
),
ipmac{
ip: netip.MustParseAddr("11.22.33.44"),
mac: macAddrOf(net.HardwareAddr{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}),
},
))
}

View File

@@ -55,9 +55,6 @@ func setupUserChain(ipVersion iptables.IPVersion) error {
if _, err := ipt.NewChain(userChain, iptables.Filter); err != nil {
return fmt.Errorf("failed to create %s %v chain: %v", userChain, ipVersion, err)
}
if err := ipt.AddReturnRule(userChain); err != nil {
return fmt.Errorf("failed to add the RETURN rule for %s %v: %w", userChain, ipVersion, err)
}
if err := ipt.EnsureJumpRule("FORWARD", userChain); err != nil {
return fmt.Errorf("failed to ensure the jump rule for %s %v: %w", userChain, ipVersion, err)
}

View File

@@ -13,14 +13,14 @@ import (
// The zero value is an empty set matrix ready to use.
//
// SetMatrix values are safe for concurrent use.
type SetMatrix[T comparable] struct {
matrix map[string]mapset.Set[T]
type SetMatrix[K, V comparable] struct {
matrix map[K]mapset.Set[V]
mu sync.Mutex
}
// Get returns the members of the set for a specific key as a slice.
func (s *SetMatrix[T]) Get(key string) ([]T, bool) {
func (s *SetMatrix[K, V]) Get(key K) ([]V, bool) {
s.mu.Lock()
defer s.mu.Unlock()
set, ok := s.matrix[key]
@@ -31,7 +31,7 @@ func (s *SetMatrix[T]) Get(key string) ([]T, bool) {
}
// Contains is used to verify if an element is in a set for a specific key.
func (s *SetMatrix[T]) Contains(key string, value T) (containsElement, setExists bool) {
func (s *SetMatrix[K, V]) Contains(key K, value V) (containsElement, setExists bool) {
s.mu.Lock()
defer s.mu.Unlock()
set, ok := s.matrix[key]
@@ -43,13 +43,13 @@ func (s *SetMatrix[T]) Contains(key string, value T) (containsElement, setExists
// Insert inserts the value in the set of a key and returns whether the value is
// inserted (was not already in the set) and the number of elements in the set.
func (s *SetMatrix[T]) Insert(key string, value T) (inserted bool, cardinality int) {
func (s *SetMatrix[K, V]) Insert(key K, value V) (inserted bool, cardinality int) {
s.mu.Lock()
defer s.mu.Unlock()
set, ok := s.matrix[key]
if !ok {
if s.matrix == nil {
s.matrix = make(map[string]mapset.Set[T])
s.matrix = make(map[K]mapset.Set[V])
}
s.matrix[key] = mapset.NewThreadUnsafeSet(value)
return true, 1
@@ -59,7 +59,7 @@ func (s *SetMatrix[T]) Insert(key string, value T) (inserted bool, cardinality i
}
// Remove removes the value in the set for a specific key.
func (s *SetMatrix[T]) Remove(key string, value T) (removed bool, cardinality int) {
func (s *SetMatrix[K, V]) Remove(key K, value V) (removed bool, cardinality int) {
s.mu.Lock()
defer s.mu.Unlock()
set, ok := s.matrix[key]
@@ -80,7 +80,7 @@ func (s *SetMatrix[T]) Remove(key string, value T) (removed bool, cardinality in
}
// Cardinality returns the number of elements in the set for a key.
func (s *SetMatrix[T]) Cardinality(key string) (cardinality int, ok bool) {
func (s *SetMatrix[K, V]) Cardinality(key K) (cardinality int, ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
set, ok := s.matrix[key]
@@ -93,7 +93,7 @@ func (s *SetMatrix[T]) Cardinality(key string) (cardinality int, ok bool) {
// String returns the string version of the set.
// The empty string is returned if there is no set for key.
func (s *SetMatrix[T]) String(key string) (v string, ok bool) {
func (s *SetMatrix[K, V]) String(key K) (v string, ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
set, ok := s.matrix[key]
@@ -104,10 +104,10 @@ func (s *SetMatrix[T]) String(key string) (v string, ok bool) {
}
// Keys returns all the keys in the map.
func (s *SetMatrix[T]) Keys() []string {
func (s *SetMatrix[K, V]) Keys() []K {
s.mu.Lock()
defer s.mu.Unlock()
keys := make([]string, 0, len(s.matrix))
keys := make([]K, 0, len(s.matrix))
for k := range s.matrix {
keys = append(keys, k)
}

View File

@@ -9,7 +9,7 @@ import (
)
func TestSetSerialInsertDelete(t *testing.T) {
var s SetMatrix[string]
var s SetMatrix[string, string]
b, i := s.Insert("a", "1")
if !b || i != 1 {
@@ -135,7 +135,7 @@ func TestSetSerialInsertDelete(t *testing.T) {
}
}
func insertDeleteRotuine(ctx context.Context, endCh chan int, s *SetMatrix[string], key, value string) {
func insertDeleteRotuine(ctx context.Context, endCh chan int, s *SetMatrix[string, string], key, value string) {
for {
select {
case <-ctx.Done():
@@ -158,7 +158,7 @@ func insertDeleteRotuine(ctx context.Context, endCh chan int, s *SetMatrix[strin
}
func TestSetParallelInsertDelete(t *testing.T) {
var s SetMatrix[string]
var s SetMatrix[string, string]
parallelRoutines := 6
endCh := make(chan int)
// Let the routines running and competing for 10s

View File

@@ -457,7 +457,7 @@ func getSvcRecords(t *testing.T, n *Network, key string) (addrs []netip.Addr, fo
sr, ok := n.ctrlr.svcRecords[n.id]
assert.Assert(t, ok)
lookup := func(svcMap *setmatrix.SetMatrix[svcMapEntry]) bool {
lookup := func(svcMap *setmatrix.SetMatrix[string, svcMapEntry]) bool {
mapEntryList, ok := svcMap.Get(key)
if !ok {
return false

View File

@@ -57,9 +57,9 @@ type svcMapEntry struct {
}
type svcInfo struct {
svcMap setmatrix.SetMatrix[svcMapEntry]
svcIPv6Map setmatrix.SetMatrix[svcMapEntry]
ipMap setmatrix.SetMatrix[ipInfo]
svcMap setmatrix.SetMatrix[string, svcMapEntry]
svcIPv6Map setmatrix.SetMatrix[string, svcMapEntry]
ipMap setmatrix.SetMatrix[string, ipInfo]
service map[string][]servicePorts
}
@@ -1370,7 +1370,7 @@ func (n *Network) updateSvcRecord(ctx context.Context, ep *Endpoint, isAdd bool)
}
}
func addIPToName(ipMap *setmatrix.SetMatrix[ipInfo], name, serviceID string, ip net.IP) {
func addIPToName(ipMap *setmatrix.SetMatrix[string, ipInfo], name, serviceID string, ip net.IP) {
reverseIP := netutils.ReverseIP(ip.String())
ipMap.Insert(reverseIP, ipInfo{
name: name,
@@ -1378,7 +1378,7 @@ func addIPToName(ipMap *setmatrix.SetMatrix[ipInfo], name, serviceID string, ip
})
}
func delIPToName(ipMap *setmatrix.SetMatrix[ipInfo], name, serviceID string, ip net.IP) {
func delIPToName(ipMap *setmatrix.SetMatrix[string, ipInfo], name, serviceID string, ip net.IP) {
reverseIP := netutils.ReverseIP(ip.String())
ipMap.Remove(reverseIP, ipInfo{
name: name,
@@ -1386,7 +1386,7 @@ func delIPToName(ipMap *setmatrix.SetMatrix[ipInfo], name, serviceID string, ip
})
}
func addNameToIP(svcMap *setmatrix.SetMatrix[svcMapEntry], name, serviceID string, epIP net.IP) {
func addNameToIP(svcMap *setmatrix.SetMatrix[string, svcMapEntry], name, serviceID string, epIP net.IP) {
// Since DNS name resolution is case-insensitive, Use the lower-case form
// of the name as the key into svcMap
lowerCaseName := strings.ToLower(name)
@@ -1396,7 +1396,7 @@ func addNameToIP(svcMap *setmatrix.SetMatrix[svcMapEntry], name, serviceID strin
})
}
func delNameToIP(svcMap *setmatrix.SetMatrix[svcMapEntry], name, serviceID string, epIP net.IP) {
func delNameToIP(svcMap *setmatrix.SetMatrix[string, svcMapEntry], name, serviceID string, epIP net.IP) {
lowerCaseName := strings.ToLower(name)
svcMap.Remove(lowerCaseName, svcMapEntry{
ip: epIP.String(),

View File

@@ -180,8 +180,8 @@ func (r *Resolver) Start() error {
return r.err
}
if err := r.setupIPTable(); err != nil {
return fmt.Errorf("setting up IP table rules failed: %v", err)
if err := r.setupNAT(context.TODO()); err != nil {
return fmt.Errorf("setting up DNAT/SNAT rules failed: %v", err)
}
s := &dns.Server{Handler: dns.HandlerFunc(r.serveDNS), PacketConn: r.conn}

View File

@@ -3,9 +3,11 @@
package libnetwork
import (
"context"
"fmt"
"net"
"github.com/docker/docker/libnetwork/internal/nftables"
"github.com/docker/docker/libnetwork/iptables"
)
@@ -16,7 +18,7 @@ const (
postroutingChain = "DOCKER_POSTROUTING"
)
func (r *Resolver) setupIPTable() error {
func (r *Resolver) setupNAT(ctx context.Context) error {
if r.err != nil {
return r.err
}
@@ -24,6 +26,14 @@ func (r *Resolver) setupIPTable() error {
ltcpaddr := r.tcpListen.Addr().String()
resolverIP, ipPort, _ := net.SplitHostPort(laddr)
_, tcpPort, _ := net.SplitHostPort(ltcpaddr)
if nftables.Enabled() {
return r.setupNftablesNAT(ctx, laddr, ltcpaddr, resolverIP, ipPort, tcpPort)
}
return r.setupIptablesNAT(laddr, ltcpaddr, resolverIP, ipPort, tcpPort)
}
func (r *Resolver) setupIptablesNAT(laddr, ltcpaddr, resolverIP, ipPort, tcpPort string) error {
rules := [][]string{
{"-t", "nat", "-I", outputChain, "-d", resolverIP, "-p", "udp", "--dport", dnsPort, "-j", "DNAT", "--to-destination", laddr},
{"-t", "nat", "-I", postroutingChain, "-s", resolverIP, "-p", "udp", "--sport", ipPort, "-j", "SNAT", "--to-source", ":" + dnsPort},
@@ -81,3 +91,40 @@ func (r *Resolver) setupIPTable() error {
}
return setupErr
}
func (r *Resolver) setupNftablesNAT(ctx context.Context, laddr, ltcpaddr, resolverIP, ipPort, tcpPort string) error {
table, err := nftables.NewTable(nftables.IPv4, "docker-dns")
if err != nil {
return err
}
dnatChain, err := table.BaseChain("dns-dnat", nftables.BaseChainTypeNAT, nftables.BaseChainHookOutput, nftables.BaseChainPriorityDstNAT)
if err != nil {
return err
}
if err := dnatChain.AppendRule(0, "ip daddr %s udp dport %s counter dnat to %s", resolverIP, dnsPort, laddr); err != nil {
return err
}
if err := dnatChain.AppendRule(0, "ip daddr %s tcp dport %s counter dnat to %s", resolverIP, dnsPort, ltcpaddr); err != nil {
return err
}
snatChain, err := table.BaseChain("dns-snat", nftables.BaseChainTypeNAT, nftables.BaseChainHookPostrouting, nftables.BaseChainPrioritySrcNAT)
if err != nil {
return err
}
if err := snatChain.AppendRule(0, "ip saddr %s udp sport %s counter snat to :%s", resolverIP, ipPort, dnsPort); err != nil {
return err
}
if err := snatChain.AppendRule(0, "ip saddr %s tcp sport %s counter snat to :%s", resolverIP, tcpPort, dnsPort); err != nil {
return err
}
var setupErr error
if err := r.backend.ExecFunc(func() {
setupErr = table.Apply(ctx)
}); err != nil {
return err
}
return setupErr
}

View File

@@ -2,6 +2,8 @@
package libnetwork
func (r *Resolver) setupIPTable() error {
import "context"
func (r *Resolver) setupNAT(context.Context) error {
return nil
}

View File

@@ -57,7 +57,7 @@ type service struct {
// associated with it. At stable state the endpoint ID expected is 1
// but during transition and service change it is possible to have
// temporary more than 1
ipToEndpoint setmatrix.SetMatrix[string]
ipToEndpoint setmatrix.SetMatrix[string, string]
deleted bool

View File

@@ -1,2 +1 @@
-N DOCKER-USER
-A DOCKER-USER -j RETURN

View File

@@ -1,2 +1 @@
-N DOCKER-USER
-A DOCKER-USER -j RETURN

View File

@@ -1,2 +1 @@
-N DOCKER-USER
-A DOCKER-USER -j RETURN

View File

@@ -1,2 +1 @@
-N DOCKER-USER
-A DOCKER-USER -j RETURN