vendor: github.com/moby/buildkit v0.24.0-rc1

full diff: https://github.com/moby/buildkit/compare/v0.23.2...v0.24.0-rc1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 229a29649f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2025-08-20 16:30:50 +02:00
parent 314a8f8f0b
commit b13b91c54b
49 changed files with 1643 additions and 287 deletions

View File

@@ -19,9 +19,6 @@ if [[ "${buildkit_ref}" == *-*-* ]]; then
buildkit_ref=$(curl -s "https://api.github.com/repos/${buildkit_repo}/commits/${buildkit_ref}" | jq -r .sha)
fi
# FIXME(thaJeztah) temporarily overriding version to use for tests; remove with the next release of buildkit; see https://github.com/moby/moby/issues/50389
buildkit_ref=dd2b4e18663c58ac3762d7b60b2c3301f71d5fa9
cat << EOF
BUILDKIT_REPO=$buildkit_repo
BUILDKIT_REF=$buildkit_ref

View File

@@ -62,7 +62,7 @@ require (
github.com/miekg/dns v1.1.66
github.com/mistifyio/go-zfs/v3 v3.0.1
github.com/mitchellh/copystructure v1.2.0
github.com/moby/buildkit v0.23.2 // FIXME(thaJeztah): remove override from hack/buildkit-ref when updating.
github.com/moby/buildkit v0.24.0-rc1
github.com/moby/docker-image-spec v1.3.1
github.com/moby/go-archive v0.1.0
github.com/moby/ipvs v1.1.0

View File

@@ -175,8 +175,8 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A=
github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo=
github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
@@ -383,8 +383,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ=
github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0=
github.com/moby/buildkit v0.24.0-rc1 h1:taA+MPeYWtGyRQ1SGbdTHVk5khWKFR7f9WI2coW/Ggs=
github.com/moby/buildkit v0.24.0-rc1/go.mod h1:4qovICAdR2H4C7+EGMRva5zgHW1gyhT4/flHI7F5F9k=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=

View File

@@ -3,7 +3,6 @@ package contenthash
import (
"bytes"
"context"
"crypto/sha256"
"io"
"os"
"path"
@@ -18,6 +17,7 @@ import (
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/locker"
"github.com/moby/patternmatcher"
digest "github.com/opencontainers/go-digest"
@@ -450,15 +450,15 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable,
return digest.Digest(includedPaths[0].record.Digest), nil
}
digester := digest.Canonical.Digester()
h := cachedigest.NewHash(cachedigest.TypeFileList)
for i, w := range includedPaths {
if i != 0 {
digester.Hash().Write([]byte{0})
h.Write([]byte{0})
}
digester.Hash().Write([]byte(path.Base(w.path)))
digester.Hash().Write([]byte(w.record.Digest))
h.Write([]byte(path.Base(w.path)))
h.Write([]byte(w.record.Digest))
}
return digester.Digest(), nil
return h.Sum(), nil
}
func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, opts ChecksumOpts) ([]*includedPath, error) {
@@ -881,7 +881,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node[*CacheRe
switch cr.Type {
case CacheRecordTypeDir:
h := sha256.New()
h := cachedigest.NewHash(cachedigest.TypeFileList)
next := append(k, 0)
iter := root.Iterator()
iter.SeekLowerBound(append(slices.Clone(next), 0))
@@ -906,7 +906,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node[*CacheRe
}
subk, _, ok = iter.Next()
}
dgst = digest.NewDigest(digest.SHA256, h)
dgst = h.Sum()
default:
p := convertKeyToPath(bytes.TrimSuffix(k, []byte{0}))

View File

@@ -2,12 +2,13 @@ package contenthash
import (
"archive/tar"
"crypto/sha256"
"encoding/hex"
"hash"
"os"
"path/filepath"
"time"
"github.com/moby/buildkit/util/cachedigest"
"github.com/pkg/errors"
fstypes "github.com/tonistiigi/fsutil/types"
)
@@ -62,13 +63,14 @@ func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) {
}
}
// fmt.Printf("hdr: %#v\n", hdr)
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
h := cachedigest.NewHash(cachedigest.TypeFile)
tsh := &tarsumHash{hdr: hdr, Hash: h}
tsh.Reset() // initialize header
return tsh, nil
}
type tarsumHash struct {
hash.Hash
*cachedigest.Hash
hdr *tar.Header
}
@@ -79,6 +81,19 @@ func (tsh *tarsumHash) Reset() {
WriteV1TarsumHeaders(tsh.hdr, tsh.Hash)
}
func (tsh *tarsumHash) Write(p []byte) (n int, err error) {
n, err = tsh.WriteNoDebug(p)
if n > 0 {
tsh.hdr.Size += int64(n)
}
return n, err
}
func (tsh *tarsumHash) Sum(_ []byte) []byte {
b, _ := hex.DecodeString(tsh.Hash.Sum().Hex())
return b
}
type statInfo struct {
*fstypes.Stat
}

View File

@@ -2,6 +2,7 @@ package cache
import (
"context"
stderrors "errors"
"fmt"
"maps"
"os"
@@ -19,7 +20,6 @@ import (
"github.com/containerd/containerd/v2/core/snapshots"
"github.com/containerd/containerd/v2/pkg/labels"
cerrdefs "github.com/containerd/errdefs"
"github.com/hashicorp/go-multierror"
"github.com/moby/buildkit/cache/config"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
@@ -146,12 +146,15 @@ type diffParents struct {
}
// caller must hold cacheManager.mu
func (p parentRefs) release(ctx context.Context) (rerr error) {
func (p parentRefs) release(ctx context.Context) error {
var errs []error
switch {
case p.layerParent != nil:
p.layerParent.mu.Lock()
defer p.layerParent.mu.Unlock()
rerr = p.layerParent.release(ctx)
if err := p.layerParent.release(ctx); err != nil {
errs = append(errs, err)
}
case len(p.mergeParents) > 0:
for i, parent := range p.mergeParents {
if parent == nil {
@@ -159,7 +162,7 @@ func (p parentRefs) release(ctx context.Context) (rerr error) {
}
parent.mu.Lock()
if err := parent.release(ctx); err != nil {
rerr = multierror.Append(rerr, err).ErrorOrNil()
errs = append(errs, err)
} else {
p.mergeParents[i] = nil
}
@@ -170,7 +173,7 @@ func (p parentRefs) release(ctx context.Context) (rerr error) {
p.diffParents.lower.mu.Lock()
defer p.diffParents.lower.mu.Unlock()
if err := p.diffParents.lower.release(ctx); err != nil {
rerr = multierror.Append(rerr, err).ErrorOrNil()
errs = append(errs, err)
} else {
p.diffParents.lower = nil
}
@@ -179,14 +182,14 @@ func (p parentRefs) release(ctx context.Context) (rerr error) {
p.diffParents.upper.mu.Lock()
defer p.diffParents.upper.mu.Unlock()
if err := p.diffParents.upper.release(ctx); err != nil {
rerr = multierror.Append(rerr, err).ErrorOrNil()
errs = append(errs, err)
} else {
p.diffParents.upper = nil
}
}
}
return rerr
return stderrors.Join(errs...)
}
func (p parentRefs) cloneParentRefs() parentRefs {
@@ -598,18 +601,19 @@ func (cr *cacheRecord) layerDigestChain() []digest.Digest {
type RefList []ImmutableRef
func (l RefList) Release(ctx context.Context) (rerr error) {
func (l RefList) Release(ctx context.Context) error {
var errs []error
for i, r := range l {
if r == nil {
continue
}
if err := r.Release(ctx); err != nil {
rerr = multierror.Append(rerr, err).ErrorOrNil()
errs = append(errs, err)
} else {
l[i] = nil
}
}
return rerr
return stderrors.Join(errs...)
}
func (sr *immutableRef) LayerChain() RefList {

View File

@@ -114,7 +114,16 @@ func getAvailableBlobs(ctx context.Context, cs content.Store, chain *solver.Remo
}
var descs []ocispecs.Descriptor
if err := walkBlob(ctx, cs, target, func(desc ocispecs.Descriptor) bool {
descs = append(descs, desc)
// Nothing prevents this function from being called multiple times for the same descriptor.
// So we need to make sure we don't add the same descriptor again.
// Looping over the list is preferable:
// 1. to avoid using a map, which don't preserve the order of descriptors,
// 2. descs will have a length the number of compression variants for a blob, which is usually very small
if !slices.ContainsFunc(descs, func(d ocispecs.Descriptor) bool {
return d.Digest == desc.Digest
}) {
descs = append(descs, desc)
}
return true
}); err != nil {
bklog.G(ctx).WithError(err).Warn("failed to walk variant blob") // is not a critical error at this moment.

View File

@@ -106,7 +106,12 @@ func (cs *cacheKeyStorage) Exists(id string) bool {
return ok
}
func (cs *cacheKeyStorage) Walk(func(id string) error) error {
func (cs *cacheKeyStorage) Walk(cb func(id string) error) error {
for id := range cs.byID {
if err := cb(id); err != nil {
return err
}
}
return nil
}
@@ -142,6 +147,26 @@ func (cs *cacheKeyStorage) Release(resultID string) error {
func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error {
return nil
}
func (cs *cacheKeyStorage) WalkLinksAll(id string, fn func(id string, link solver.CacheInfoLink) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
for nl, ids := range it.links {
for _, id2 := range ids {
if err := fn(id2, solver.CacheInfoLink{
Input: solver.Index(nl.input),
Selector: digest.Digest(nl.selector),
Digest: nl.dgst,
}); err != nil {
return err
}
}
}
return nil
}
func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
it, ok := cs.byID[id]
if !ok {

View File

@@ -351,8 +351,7 @@ func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (st
return v, ok, nil
}
// Env returns a new [State] with the provided environment variable set.
// See [Env]
// Env returns the current environment variables for the state.
func (s State) Env(ctx context.Context, co ...ConstraintsOpt) (*EnvList, error) {
c := &Constraints{}
for _, f := range co {

View File

@@ -55,10 +55,11 @@ type SolveOpt struct {
}
type ExportEntry struct {
Type string
Attrs map[string]string
Output filesync.FileOutputFunc // for ExporterOCI and ExporterDocker
OutputDir string // for ExporterLocal
Type string
Attrs map[string]string
Output filesync.FileOutputFunc // for ExporterOCI and ExporterDocker
OutputDir string // for ExporterLocal
OutputStore content.Store
}
type CacheOptionsEntry struct {
@@ -154,26 +155,28 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
var syncTargets []filesync.FSSyncTarget
for exID, ex := range opt.Exports {
var supportFile bool
var supportDir bool
var supportFile, supportDir, supportStore bool
switch ex.Type {
case ExporterLocal:
supportDir = true
case ExporterTar:
supportFile = true
case ExporterOCI, ExporterDocker:
supportDir = ex.OutputDir != ""
supportFile = ex.Output != nil
}
if supportFile && supportDir {
return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type)
supportStore = ex.OutputStore != nil || ex.OutputDir != ""
if supportFile && supportStore {
return nil, errors.Errorf("both file and store output is not supported by %s exporter", ex.Type)
}
}
if !supportFile && ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if !supportDir && ex.OutputDir != "" {
if !supportDir && !supportStore && ex.OutputDir != "" {
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
}
if !supportStore && ex.OutputStore != nil {
return nil, errors.Errorf("output store is not supported by %s exporter", ex.Type)
}
if supportFile {
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
@@ -184,20 +187,27 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
if ex.OutputDir == "" {
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
}
switch ex.Type {
case ExporterOCI, ExporterDocker:
syncTargets = append(syncTargets, filesync.WithFSSyncDir(exID, ex.OutputDir))
}
if supportStore {
store := ex.OutputStore
if store == nil {
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
return nil, err
}
cs, err := contentlocal.NewStore(ex.OutputDir)
store, err = contentlocal.NewStore(ex.OutputDir)
if err != nil {
return nil, err
}
contentStores["export"] = cs
storesToUpdate = append(storesToUpdate, ex.OutputDir)
default:
syncTargets = append(syncTargets, filesync.WithFSSyncDir(exID, ex.OutputDir))
}
// TODO: this should be dependent on the exporter id (to allow multiple oci exporters)
storeName := "export"
if _, ok := contentStores[storeName]; ok {
return nil, errors.Errorf("oci store key %q already exists", storeName)
}
contentStores[storeName] = store
}
}

View File

@@ -12,7 +12,7 @@ type Config struct {
// Root is the path to a directory where buildkit will store persistent data
Root string `toml:"root"`
// Entitlements e.g. security.insecure, network.host
// Entitlements e.g. security.insecure, network.host, device
Entitlements []string `toml:"insecure-entitlements"`
// LogFormat is the format of the logs. It can be "json" or "text".

View File

@@ -2,6 +2,7 @@ package control
import (
"context"
stderrors "errors"
"fmt"
"runtime/trace"
"strconv"
@@ -13,7 +14,6 @@ import (
"github.com/containerd/containerd/v2/core/content"
"github.com/containerd/containerd/v2/plugins/services/content/contentserver"
"github.com/distribution/reference"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/hashstructure/v2"
controlapi "github.com/moby/buildkit/api/services/control"
apitypes "github.com/moby/buildkit/api/types"
@@ -138,17 +138,20 @@ func NewController(opt Opt) (*Controller, error) {
}
func (c *Controller) Close() error {
rerr := c.opt.HistoryDB.Close()
var errs []error
if err := c.opt.HistoryDB.Close(); err != nil {
errs = append(errs, err)
}
if err := c.opt.WorkerController.Close(); err != nil {
rerr = multierror.Append(rerr, err)
errs = append(errs, err)
}
if err := c.opt.CacheStore.Close(); err != nil {
rerr = multierror.Append(rerr, err)
errs = append(errs, err)
}
if err := c.solver.Close(); err != nil {
rerr = multierror.Append(rerr, err)
errs = append(errs, err)
}
return rerr
return stderrors.Join(errs...)
}
func (c *Controller) Register(server *grpc.Server) {

View File

@@ -2,16 +2,27 @@ package oci
import (
"context"
"net/netip"
"os"
"path/filepath"
"github.com/docker/docker/libnetwork/resolvconf"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/resolvconf"
"github.com/moby/sys/user"
"github.com/pkg/errors"
)
const (
// defaultPath is the default path to the resolv.conf that contains
// information to resolve DNS.
defaultPath = "/etc/resolv.conf"
// alternatePath is a path different from defaultPath, that may be used to
// resolve DNS.
alternatePath = "/run/systemd/resolve/resolv.conf"
)
var (
g flightcontrol.Group[struct{}]
notFirstRun bool
@@ -20,16 +31,28 @@ var (
// overridden by tests
var resolvconfPath = func(netMode pb.NetMode) string {
// The implementation of resolvconf.Path checks if systemd resolved is activated and chooses the internal
// resolv.conf (/run/systemd/resolve/resolv.conf) in such a case - see resolvconf_path.go of libnetwork.
// This, however, can be problematic, see https://github.com/moby/buildkit/issues/2404 and is not necessary
// in case the networking mode is set to host since the locally (127.0.0.53) running resolved daemon is
// accessible from inside a host networked container.
// For details of the implementation see https://github.com/moby/buildkit/pull/5207#discussion_r1705362230.
// Directly return /etc/resolv.conf if the networking mode is set to host
// since the locally (127.0.0.53) running resolved daemon is accessible
// from inside a host networked container. For details of the
// implementation see https://github.com/moby/buildkit/pull/5207#discussion_r1705362230.
if netMode == pb.NetMode_HOST {
return "/etc/resolv.conf"
return defaultPath
}
return resolvconf.Path()
// When /etc/resolv.conf contains 127.0.0.53 as the only nameserver, then
// it is assumed systemd-resolved manages DNS. Because inside the container
// 127.0.0.53 is not a valid DNS server, then return /run/systemd/resolve/resolv.conf
// which is the resolv.conf that systemd-resolved generates and manages.
// Otherwise, return /etc/resolv.conf.
rc, err := resolvconf.Load(defaultPath)
if err != nil {
return defaultPath
}
ns := rc.NameServers()
if len(ns) == 1 && ns[0] == netip.MustParseAddr("127.0.0.53") {
bklog.G(context.TODO()).Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath)
return alternatePath
}
return defaultPath
}
type DNSConfig struct {
@@ -76,41 +99,40 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *user.IdentityMap
return struct{}{}, nil
}
dt, err := os.ReadFile(resolvconfPath(netMode))
rc, err := resolvconf.Load(resolvconfPath(netMode))
if err != nil && !errors.Is(err, os.ErrNotExist) {
return struct{}{}, errors.WithStack(err)
}
tmpPath := p + ".tmp"
if dns != nil {
var (
dnsNameservers = dns.Nameservers
dnsSearchDomains = dns.SearchDomains
dnsOptions = dns.Options
)
if len(dns.Nameservers) == 0 {
dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP)
if len(dns.Nameservers) > 0 {
var ns []netip.Addr
for _, addr := range dns.Nameservers {
ipAddr, err := netip.ParseAddr(addr)
if err != nil {
return struct{}{}, errors.WithStack(errors.Wrap(err, "bad nameserver address"))
}
ns = append(ns, ipAddr)
}
rc.OverrideNameServers(ns)
}
if len(dns.SearchDomains) == 0 {
dnsSearchDomains = resolvconf.GetSearchDomains(dt)
if len(dns.SearchDomains) > 0 {
rc.OverrideSearch(dns.SearchDomains)
}
if len(dns.Options) == 0 {
dnsOptions = resolvconf.GetOptions(dt)
if len(dns.Options) > 0 {
rc.OverrideOptions(dns.Options)
}
f, err := resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions)
if err != nil {
return struct{}{}, errors.WithStack(err)
}
dt = f.Content
}
if netMode != pb.NetMode_HOST || len(resolvconf.GetNameservers(dt, resolvconf.IP)) == 0 {
f, err := resolvconf.FilterResolvDNS(dt, true)
if err != nil {
return struct{}{}, errors.WithStack(err)
}
dt = f.Content
if netMode != pb.NetMode_HOST || len(rc.NameServers()) == 0 {
rc.TransformForLegacyNw(true)
}
tmpPath := p + ".tmp"
dt, err := rc.Generate(false)
if err != nil {
return struct{}{}, errors.WithStack(err)
}
if err := os.WriteFile(tmpPath, dt, 0644); err != nil {
@@ -124,6 +146,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *user.IdentityMap
}
}
// TODO(thaJeztah): can we avoid the write -> chown -> rename?
if err := os.Rename(tmpPath, p); err != nil {
return struct{}{}, errors.WithStack(err)
}

View File

@@ -14,12 +14,12 @@ import (
"github.com/containerd/containerd/v2/pkg/oci"
cdseccomp "github.com/containerd/containerd/v2/pkg/seccomp"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/profiles/seccomp"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/llbsolver/cdidevices"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/entitlements/security"
"github.com/moby/profiles/seccomp"
"github.com/moby/sys/user"
specs "github.com/opencontainers/runtime-spec/specs-go"
selinux "github.com/opencontainers/selinux/go-selinux"

View File

@@ -29,6 +29,7 @@ import (
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/errutil"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/push"
@@ -358,10 +359,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
if err != nil {
var statusErr remoteserrors.ErrUnexpectedStatus
if errors.As(err, &statusErr) {
var dErr docker.Errors
if err1 := json.Unmarshal(statusErr.Body, &dErr); err1 == nil && len(dErr) > 0 {
err = &formattedDockerError{dErr: dErr}
}
err = errutil.WithDetails(err)
}
return nil, nil, errors.Wrapf(err, "failed to push %v", targetName)
}
@@ -550,36 +548,3 @@ func (d *descriptorReference) Descriptor() ocispecs.Descriptor {
func (d *descriptorReference) Release() error {
return d.release(context.TODO())
}
type formattedDockerError struct {
dErr docker.Errors
}
func (e *formattedDockerError) Error() string {
format := func(err error) string {
out := err.Error()
var dErr docker.Error
if errors.As(err, &dErr) {
if v, ok := dErr.Detail.(string); ok && v != "" {
out += " - " + v
}
}
return out
}
switch len(e.dErr) {
case 0:
return "<nil>"
case 1:
return format(e.dErr[0])
default:
msg := "errors:\n"
for _, err := range e.dErr {
msg += format(err) + "\n"
}
return msg
}
}
func (e *formattedDockerError) Unwrap() error {
return e.dErr
}

View File

@@ -1,10 +1,12 @@
package gitutil
// Package dfgitutil provides Dockerfile-specific utilities for git refs.
package dfgitutil
import (
"net/url"
"strings"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/buildkit/util/gitutil"
"github.com/pkg/errors"
)
@@ -53,7 +55,7 @@ func ParseGitRef(ref string) (*GitRef, error) {
res := &GitRef{}
var (
remote *GitURL
remote *gitutil.GitURL
err error
)
@@ -61,14 +63,14 @@ func ParseGitRef(ref string) (*GitRef, error) {
return nil, cerrdefs.ErrInvalidArgument
} else if strings.HasPrefix(ref, "github.com/") {
res.IndistinguishableFromLocal = true // Deprecated
remote = fromURL(&url.URL{
remote = gitutil.FromURL(&url.URL{
Scheme: "https",
Host: "github.com",
Path: strings.TrimPrefix(ref, "github.com/"),
})
} else {
remote, err = ParseURL(ref)
if errors.Is(err, ErrUnknownProtocol) {
remote, err = gitutil.ParseURL(ref)
if errors.Is(err, gitutil.ErrUnknownProtocol) {
return nil, err
}
if err != nil {
@@ -76,13 +78,13 @@ func ParseGitRef(ref string) (*GitRef, error) {
}
switch remote.Scheme {
case HTTPProtocol, GitProtocol:
case gitutil.HTTPProtocol, gitutil.GitProtocol:
res.UnencryptedTCP = true // Discouraged, but not deprecated
}
switch remote.Scheme {
// An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix.
case HTTPProtocol, HTTPSProtocol:
case gitutil.HTTPProtocol, gitutil.HTTPSProtocol:
if !strings.HasSuffix(remote.Path, ".git") {
return nil, cerrdefs.ErrInvalidArgument
}

View File

@@ -21,10 +21,10 @@ import (
"github.com/containerd/platforms"
"github.com/distribution/reference"
"github.com/docker/go-connections/nat"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/imagemetaresolver"
"github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/frontend/dockerfile/dfgitutil"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/linter"
"github.com/moby/buildkit/frontend/dockerfile/parser"
@@ -36,7 +36,6 @@ import (
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/gitutil"
"github.com/moby/buildkit/util/suggest"
"github.com/moby/buildkit/util/system"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
@@ -407,7 +406,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
for _, d := range allDispatchStates.states {
d.commands = make([]command, len(d.stage.Commands))
for i, cmd := range d.stage.Commands {
newCmd, err := toCommand(cmd, allDispatchStates)
newCmd, err := toCommand(cmd, allDispatchStates, shlex)
if err != nil {
return nil, err
}
@@ -484,8 +483,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
d.dispatched = true
d.state = *st
if img != nil {
// timestamps are inherited as-is, regardless to SOURCE_DATE_EPOCH
// https://github.com/moby/buildkit/issues/4614
img.Created = nil
d.image = *img
if img.Architecture != "" && img.OS != "" {
d.platform = &ocispecs.Platform{
@@ -516,11 +514,13 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
if err != nil {
return err
}
if img != nil {
d.image = *img
} else {
d.image = emptyImage(platformOpt.targetPlatform)
if img == nil {
imgp := emptyImage(*platform)
img = &imgp
}
d.baseImg = cloneX(img) // immutable
img.Created = nil
d.image = *img
d.state = st.Platform(*platform)
d.platform = platform
return nil
@@ -628,7 +628,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
}
if len(onbuilds) > 0 {
if b, err := initOnBuildTriggers(d, onbuilds, allDispatchStates); err != nil {
if b, err := initOnBuildTriggers(d, onbuilds, allDispatchStates, shlex); err != nil {
return nil, parser.SetLocation(err, d.stage.Location)
} else if b {
newDeps = true
@@ -815,10 +815,17 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
return target, nil
}
func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (command, error) {
func toCommand(ic instructions.Command, allDispatchStates *dispatchStates, shlex *shell.Lex) (command, error) {
cmd := command{Command: ic}
if c, ok := ic.(*instructions.CopyCommand); ok {
if c.From != "" {
res, err := shlex.ProcessWordWithMatches(c.From, shell.EnvsFromSlice(nil))
if err != nil {
return command{}, err
}
if res.Result != c.From {
return command{}, errors.Errorf("variable expansion is not supported for --from, define a new stage with FROM using ARG from global scope as a workaround")
}
var stn *dispatchState
index, err := strconv.Atoi(c.From)
if err != nil {
@@ -971,7 +978,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
case *instructions.HealthCheckCommand:
err = dispatchHealthcheck(d, c, opt.lint)
case *instructions.ExposeCommand:
err = dispatchExpose(d, c, opt.shlex)
err = dispatchExpose(d, c, &opt)
case *instructions.UserCommand:
err = dispatchUser(d, c, true)
case *instructions.VolumeCommand:
@@ -1148,7 +1155,7 @@ type command struct {
// initOnBuildTriggers initializes the onbuild triggers and creates the commands and dependecies for them.
// It returns true if there were any new dependencies added that need to be resolved.
func initOnBuildTriggers(d *dispatchState, triggers []string, allDispatchStates *dispatchStates) (bool, error) {
func initOnBuildTriggers(d *dispatchState, triggers []string, allDispatchStates *dispatchStates, shlex *shell.Lex) (bool, error) {
hasNewDeps := false
commands := make([]command, 0, len(triggers))
@@ -1167,7 +1174,7 @@ func initOnBuildTriggers(d *dispatchState, triggers []string, allDispatchStates
if err != nil {
return false, err
}
cmd, err := toCommand(ic, allDispatchStates)
cmd, err := toCommand(ic, allDispatchStates, shlex)
if err != nil {
return false, err
}
@@ -1500,7 +1507,7 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
for _, src := range cfg.params.SourcePaths {
commitMessage.WriteString(" " + src)
gitRef, gitRefErr := gitutil.ParseGitRef(src)
gitRef, gitRefErr := dfgitutil.ParseGitRef(src)
if gitRefErr == nil && !gitRef.IndistinguishableFromLocal {
if !cfg.isAddCommand {
return errors.New("source can't be a git ref for COPY")
@@ -1772,33 +1779,6 @@ func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand, l
return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil, d.epoch)
}
func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error {
ports := []string{}
env := getEnv(d.state)
for _, p := range c.Ports {
ps, err := shlex.ProcessWords(p, env)
if err != nil {
return err
}
ports = append(ports, ps...)
}
c.Ports = ports
ps, _, err := nat.ParsePortSpecs(c.Ports)
if err != nil {
return err
}
if d.image.Config.ExposedPorts == nil {
d.image.Config.ExposedPorts = make(map[string]struct{})
}
for p := range ps {
d.image.Config.ExposedPorts[string(p)] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil, d.epoch)
}
func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error {
d.state = d.state.User(c.User)
d.image.Config.User = c.User
@@ -2288,7 +2268,7 @@ func isHTTPSource(src string) bool {
func isGitSource(src string) bool {
// https://github.com/ORG/REPO.git is a git source, not an http source
if gitRef, gitErr := gitutil.ParseGitRef(src); gitRef != nil && gitErr == nil {
if gitRef, gitErr := dfgitutil.ParseGitRef(src); gitRef != nil && gitErr == nil {
return true
}
return false

View File

@@ -0,0 +1,227 @@
package dockerfile2llb
import (
"fmt"
"net"
"strconv"
"strings"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/linter"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/pkg/errors"
)
func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, opt *dispatchOpt) error {
ports := []string{}
env := getEnv(d.state)
for _, p := range c.Ports {
ps, err := opt.shlex.ProcessWords(p, env)
if err != nil {
return err
}
ports = append(ports, ps...)
}
c.Ports = ports
ps := newPortSpecs(
withLocation(c.Location()),
withLint(opt.lint),
)
psp, err := ps.parsePorts(c.Ports)
if err != nil {
return err
}
if d.image.Config.ExposedPorts == nil {
d.image.Config.ExposedPorts = make(map[string]struct{})
}
for _, p := range psp {
d.image.Config.ExposedPorts[p] = struct{}{}
}
return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil, d.epoch)
}
type portSpecs struct {
location []parser.Range
lint *linter.Linter
}
type portSpecsOption func(ps *portSpecs)
func withLocation(location []parser.Range) portSpecsOption {
return func(ps *portSpecs) {
ps.location = location
}
}
func withLint(lint *linter.Linter) portSpecsOption {
return func(ps *portSpecs) {
ps.lint = lint
}
}
func newPortSpecs(opts ...portSpecsOption) *portSpecs {
ps := &portSpecs{}
for _, opt := range opts {
opt(ps)
}
return ps
}
// parsePorts receives port specs in the format of [ip:]public:private/proto
// and returns them as a list of "port/proto".
func (ps *portSpecs) parsePorts(ports []string) (exposedPorts []string, _ error) {
for _, p := range ports {
portProtos, err := ps.parsePort(p)
if err != nil {
return nil, err
}
exposedPorts = append(exposedPorts, portProtos...)
}
return exposedPorts, nil
}
// parsePort parses a port specification string into a slice of "<portnum>/[<proto>]"
func (ps *portSpecs) parsePort(rawPort string) (portProto []string, _ error) {
ip, hostPort, containerPort := ps.splitParts(rawPort)
proto, containerPort, err := ps.splitProtoPort(containerPort)
if err != nil {
return nil, errors.Wrapf(err, "invalid port: %q", rawPort)
}
if ps.lint != nil {
if proto != strings.ToLower(proto) {
msg := linter.RuleExposeProtoCasing.Format(rawPort)
ps.lint.Run(&linter.RuleExposeProtoCasing, ps.location, msg)
}
if ip != "" || hostPort != "" {
msg := linter.RuleExposeInvalidFormat.Format(rawPort)
ps.lint.Run(&linter.RuleExposeInvalidFormat, ps.location, msg)
}
}
// TODO(thaJeztah): mapping IP-addresses should not be allowed for EXPOSE; see https://github.com/moby/buildkit/issues/2173
if ip != "" && ip[0] == '[' {
// Strip [] from IPV6 addresses
rawIP, _, err := net.SplitHostPort(ip + ":")
if err != nil {
return nil, errors.Wrapf(err, "invalid IP address %v", ip)
}
ip = rawIP
}
if ip != "" && net.ParseIP(ip) == nil {
return nil, errors.New("invalid IP address: " + ip)
}
startPort, endPort, err := ps.parsePortRange(containerPort)
if err != nil {
return nil, errors.New("invalid containerPort: " + containerPort)
}
// TODO(thaJeztah): mapping host-ports should not be allowed for EXPOSE; see https://github.com/moby/buildkit/issues/2173
if hostPort != "" {
startHostPort, endHostPort, err := ps.parsePortRange(hostPort)
if err != nil {
return nil, errors.New("invalid hostPort: " + hostPort)
}
if (endPort - startPort) != (endHostPort - startHostPort) {
// Allow host port range iff containerPort is not a range.
// In this case, use the host port range as the dynamic
// host port range to allocate into.
if endPort != startPort {
return nil, errors.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
}
}
}
count := endPort - startPort + 1
ports := make([]string, 0, count)
for i := range count {
ports = append(ports, strconv.Itoa(startPort+i)+"/"+strings.ToLower(proto))
}
return ports, nil
}
// parsePortRange parses and validates the specified string as a port range (e.g., "8000-9000").
func (ps *portSpecs) parsePortRange(ports string) (startPort, endPort int, _ error) {
if ports == "" {
return 0, 0, errors.New("empty string specified for ports")
}
start, end, ok := strings.Cut(ports, "-")
startPort, err := ps.parsePortNumber(start)
if err != nil {
return 0, 0, errors.Wrapf(err, "invalid start port '%s'", start)
}
if !ok || start == end {
return startPort, startPort, nil
}
endPort, err = ps.parsePortNumber(end)
if err != nil {
return 0, 0, errors.Wrapf(err, "invalid end port '%s'", end)
}
if endPort < startPort {
return 0, 0, errors.New("invalid port range: " + ports)
}
return startPort, endPort, nil
}
// parsePortNumber parses rawPort into an int, unwrapping strconv errors
// and returning a single "out of range" error for any value outside 065535.
func (ps *portSpecs) parsePortNumber(rawPort string) (int, error) {
if rawPort == "" {
return 0, errors.New("value is empty")
}
port, err := strconv.ParseInt(rawPort, 10, 0)
if err != nil {
var numErr *strconv.NumError
if errors.As(err, &numErr) {
err = numErr.Err
}
return 0, err
}
if port < 0 || port > 65535 {
return 0, errors.New("value out of range (065535)")
}
return int(port), nil
}
// splitProtoPort splits a port(range) and protocol, formatted as "<portnum>/[<proto>]"
// "<startport-endport>/[<proto>]". It returns an error if no port(range) or
// an invalid proto is provided. If no protocol is provided, the default ("tcp")
// protocol is returned.
func (ps *portSpecs) splitProtoPort(rawPort string) (proto string, port string, _ error) {
port, proto, _ = strings.Cut(rawPort, "/")
if port == "" {
return "", "", errors.New("no port specified")
}
switch strings.ToLower(proto) {
case "":
return "tcp", port, nil
case "tcp", "udp", "sctp":
return proto, port, nil
default:
return "", "", errors.New("invalid proto: " + proto)
}
}
func (ps *portSpecs) splitParts(rawport string) (hostIP, hostPort, containerPort string) {
parts := strings.Split(rawport, ":")
switch len(parts) {
case 1:
return "", "", parts[0]
case 2:
return "", parts[0], parts[1]
case 3:
return parts[0], parts[1], parts[2]
default:
n := len(parts)
return strings.Join(parts[:n-2], ":"), parts[n-2], parts[n-1]
}
}

View File

@@ -174,4 +174,22 @@ var (
},
Experimental: true,
}
RuleExposeProtoCasing = LinterRule[func(string) string]{
Name: "ExposeProtoCasing",
Description: "Protocol in EXPOSE instruction should be lowercase",
URL: "https://docs.docker.com/go/dockerfile/rule/expose-proto-casing/",
Format: func(port string) string {
return fmt.Sprintf("Defined protocol '%s' in EXPOSE instruction should be lowercase", port)
},
}
RuleExposeInvalidFormat = LinterRule[func(string) string]{
Name: "ExposeInvalidFormat",
Description: "IP address and host-port mapping should not be used in EXPOSE instruction. This will become an error in a future release",
URL: "https://docs.docker.com/go/dockerfile/rule/expose-invalid-format/",
Format: func(port string) string {
return fmt.Sprintf("EXPOSE instruction should not define an IP address or host-port mapping, found '%s'", port)
},
// TODO(crazy-max): deprecate this rule in the future and error out instead
// Deprecated: true,
}
)

View File

@@ -9,9 +9,9 @@ import (
"strconv"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/dfgitutil"
"github.com/moby/buildkit/frontend/gateway/client"
gwpb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/util/gitutil"
"github.com/pkg/errors"
)
@@ -141,7 +141,7 @@ func (bc *Client) initContext(ctx context.Context) (*buildContext, error) {
}
func DetectGitContext(ref string, keepGit bool) (*llb.State, bool) {
g, err := gitutil.ParseGitRef(ref)
g, err := dfgitutil.ParseGitRef(ref)
if err != nil {
return nil, false
}

View File

@@ -2,6 +2,7 @@ package snapshot
import (
"context"
stderrors "errors"
gofs "io/fs"
"os"
"path/filepath"
@@ -14,7 +15,6 @@ import (
"github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils"
"github.com/containerd/continuity/fs"
"github.com/containerd/continuity/sysx"
"github.com/hashicorp/go-multierror"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/leaseutil"
@@ -34,7 +34,7 @@ func (sn *mergeSnapshotter) diffApply(ctx context.Context, dest Mountable, diffs
defer func() {
releaseErr := a.Release()
if releaseErr != nil {
rerr = multierror.Append(rerr, errors.Wrapf(releaseErr, "failed to release applier")).ErrorOrNil()
rerr = stderrors.Join(rerr, errors.Wrapf(releaseErr, "failed to release applier"))
}
}()
@@ -84,7 +84,7 @@ func (sn *mergeSnapshotter) diffApply(ctx context.Context, dest Mountable, diffs
return snapshots.Usage{}, errors.Wrapf(err, "failed to create differ")
}
defer func() {
rerr = multierror.Append(rerr, d.Release()).ErrorOrNil()
rerr = stderrors.Join(rerr, d.Release())
}()
if err := d.HandleChanges(ctx, a.Apply); err != nil {
return snapshots.Usage{}, errors.Wrapf(err, "failed to handle changes")
@@ -146,7 +146,7 @@ func applierFor(dest Mountable, tryCrossSnapshotLink, userxattr bool) (_ *applie
}
defer func() {
if rerr != nil {
rerr = multierror.Append(rerr, a.Release()).ErrorOrNil()
rerr = stderrors.Join(rerr, a.Release())
}
}()
if tryCrossSnapshotLink {
@@ -191,7 +191,7 @@ func applierFor(dest Mountable, tryCrossSnapshotLink, userxattr bool) (_ *applie
prevRelease := a.release
a.release = func() error {
err := mnter.Unmount()
return multierror.Append(err, prevRelease()).ErrorOrNil()
return stderrors.Join(err, prevRelease())
}
}
@@ -523,7 +523,7 @@ func differFor(lowerMntable, upperMntable Mountable) (_ *differ, rerr error) {
}
defer func() {
if rerr != nil {
rerr = multierror.Append(rerr, d.Release()).ErrorOrNil()
rerr = stderrors.Join(rerr, d.Release())
}
}()
@@ -541,8 +541,7 @@ func differFor(lowerMntable, upperMntable Mountable) (_ *differ, rerr error) {
d.lowerRoot = root
lowerMnts = mnts
d.releaseLower = func() error {
err := mounter.Unmount()
return multierror.Append(err, release()).ErrorOrNil()
return stderrors.Join(mounter.Unmount(), release())
}
}
@@ -560,8 +559,7 @@ func differFor(lowerMntable, upperMntable Mountable) (_ *differ, rerr error) {
d.upperRoot = root
upperMnts = mnts
d.releaseUpper = func() error {
err := mounter.Unmount()
return multierror.Append(err, release()).ErrorOrNil()
return stderrors.Join(mounter.Unmount(), release())
}
}
@@ -779,7 +777,7 @@ func (d *differ) Release() error {
}
}
if d.releaseUpper != nil {
err = multierror.Append(err, d.releaseUpper()).ErrorOrNil()
err = stderrors.Join(err, d.releaseUpper())
if err == nil {
d.releaseUpper = nil
}

View File

@@ -342,6 +342,49 @@ func (s *Store) AddLink(id string, link solver.CacheInfoLink, target string) err
})
}
func (s *Store) WalkLinksAll(id string, fn func(id string, link solver.CacheInfoLink) error) error {
type linkEntry struct {
id string
link solver.CacheInfoLink
}
var links []linkEntry
if err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(linksBucket))
if b == nil {
return nil
}
b = b.Bucket([]byte(id))
if b == nil {
return nil
}
return b.ForEach(func(k, v []byte) error {
parts := bytes.Split(k, []byte("@"))
if len(parts) != 2 {
return errors.Errorf("invalid key %s", k)
}
var link solver.CacheInfoLink
if err := json.Unmarshal(parts[0], &link); err != nil {
return err
}
// make digest relative to output as not all backends store output separately
link.Digest = digest.FromBytes(fmt.Appendf(nil, "%s@%d", link.Digest, link.Output))
links = append(links, linkEntry{
id: string(parts[1]),
link: link,
})
return nil
})
}); err != nil {
return err
}
for _, l := range links {
if err := fn(l.id, l.link); err != nil {
return err
}
}
return nil
}
func (s *Store) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
var links []string
if err := s.db.View(func(tx *bolt.Tx) error {

View File

@@ -9,6 +9,7 @@ import (
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/cachedigest"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
@@ -448,8 +449,9 @@ func (c *cacheManager) getIDFromDeps(k *CacheKey) string {
}
func rootKey(dgst digest.Digest, output Index) digest.Digest {
out, _ := cachedigest.FromBytes(fmt.Appendf(nil, "%s@%d", dgst, output), cachedigest.TypeString)
if strings.HasPrefix(dgst.String(), "random:") {
return digest.Digest("random:" + digest.FromBytes(fmt.Appendf(nil, "%s@%d", dgst, output)).Encoded())
return digest.Digest("random:" + dgst.Encoded())
}
return digest.FromBytes(fmt.Appendf(nil, "%s@%d", dgst, output))
return out
}

View File

@@ -1226,8 +1226,8 @@ func adaptHistoryRecord(rec *controlapi.BuildHistoryRecord) filters.Adaptor {
return v, true
}
if context, ok := rec.FrontendAttrs["context"]; ok {
if ref, err := gitutil.ParseGitRef(context); err == nil {
return ref.Remote, true
if parsed, err := gitutil.ParseURL(context); err == nil {
return parsed.Remote, true
}
}
return "", false

View File

@@ -13,6 +13,7 @@ import (
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/ops/opsutils"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -51,8 +52,12 @@ func (b *BuildOp) CacheMap(ctx context.Context, g session.Group, index int) (*so
return nil, false, err
}
dgst, err := cachedigest.FromBytes(dt, cachedigest.TypeJSON)
if err != nil {
return nil, false, err
}
return &solver.CacheMap{
Digest: digest.FromBytes(dt),
Digest: dgst,
Deps: make([]struct {
Selector digest.Digest
ComputeDigestFunc solver.ResultBasedCacheFunc

View File

@@ -23,6 +23,7 @@ import (
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops/opsutils"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/util/progress/logs"
utilsystem "github.com/moby/buildkit/util/system"
"github.com/moby/buildkit/worker"
@@ -173,8 +174,12 @@ func (e *ExecOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol
return nil, false, err
}
dgst, err := cachedigest.FromBytes(dt, cachedigest.TypeJSON)
if err != nil {
return nil, false, err
}
cm := &solver.CacheMap{
Digest: digest.FromBytes(dt),
Digest: dgst,
Deps: make([]struct {
Selector digest.Digest
ComputeDigestFunc solver.ResultBasedCacheFunc

View File

@@ -19,6 +19,7 @@ import (
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
"github.com/moby/buildkit/solver/llbsolver/ops/opsutils"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
@@ -134,8 +135,12 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol
return nil, false, err
}
dgst, err := cachedigest.FromBytes(dt, cachedigest.TypeJSON)
if err != nil {
return nil, false, err
}
cm := &solver.CacheMap{
Digest: digest.FromBytes(dt),
Digest: dgst,
Deps: make([]struct {
Selector digest.Digest
ComputeDigestFunc solver.ResultBasedCacheFunc
@@ -147,13 +152,17 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol
if _, ok := invalidSelectors[idx]; ok {
continue
}
dgsts := make([][]byte, 0, len(m))
paths := make([][]byte, 0, len(m))
for _, k := range m {
dgsts = append(dgsts, []byte(k.Path))
paths = append(paths, []byte(k.Path))
}
slices.SortFunc(dgsts, bytes.Compare)
slices.Reverse(dgsts) // historical reasons
cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0}))
slices.SortFunc(paths, bytes.Compare)
slices.Reverse(paths) // historical reasons
dgst, err := cachedigest.FromBytes(bytes.Join(paths, []byte{0}), cachedigest.TypeStringList)
if err != nil {
return nil, false, err
}
cm.Deps[idx].Selector = dgst
cm.Deps[idx].ComputeDigestFunc = opsutils.NewContentHashFunc(dedupeSelectors(m))
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/worker"
"github.com/pkg/errors"
@@ -46,8 +47,12 @@ func (m *mergeOp) CacheMap(ctx context.Context, group session.Group, index int)
return nil, false, err
}
dgst, err := cachedigest.FromBytes(dt, cachedigest.TypeJSON)
if err != nil {
return nil, false, err
}
cm := &solver.CacheMap{
Digest: digest.FromBytes(dt),
Digest: dgst,
Deps: make([]struct {
Selector digest.Digest
ComputeDigestFunc solver.ResultBasedCacheFunc

View File

@@ -8,6 +8,7 @@ import (
"github.com/moby/buildkit/cache/contenthash"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -66,6 +67,6 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc {
return "", err
}
return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil
return cachedigest.FromBytes(bytes.Join(dgsts, []byte{0}), cachedigest.TypeDigestList)
}
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/moby/buildkit/solver/llbsolver/ops/opsutils"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"golang.org/x/sync/semaphore"
@@ -88,7 +89,10 @@ func (s *SourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*s
s.pin = pin
}
dgst := digest.FromBytes([]byte(sourceCacheType + ":" + k))
dgst, err := cachedigest.FromBytes([]byte(sourceCacheType+":"+k), cachedigest.TypeString)
if err != nil {
return nil, false, err
}
if strings.HasPrefix(k, "session:") {
dgst = digest.Digest("random:" + dgst.Encoded())
}

View File

@@ -489,7 +489,7 @@ func (p *ProvenanceCreator) Predicate(ctx context.Context) (any, error) {
}
if p.slsaVersion == provenancetypes.ProvenanceSLSA1 {
return provenancetypes.ConvertSLSA02ToSLSA1(p.pr), nil
return p.pr.ConvertToSLSA1(), nil
}
return p.pr, nil

View File

@@ -157,7 +157,7 @@ func NewPredicate(c *Capture) (*provenancetypes.ProvenancePredicateSLSA02, error
pr := &provenancetypes.ProvenancePredicateSLSA02{
Invocation: inv,
ProvenancePredicate: slsa02.ProvenancePredicate{
BuildType: provenancetypes.BuildKitBuildType,
BuildType: provenancetypes.BuildKitBuildType02,
Materials: materials,
},
Metadata: &provenancetypes.ProvenanceMetadataSLSA02{

View File

@@ -14,9 +14,20 @@ import (
)
const (
BuildKitBuildType = "https://mobyproject.org/buildkit@v1"
BuildKitBuildType1 = "https://github.com/moby/buildkit/blob/master/docs/attestations/slsa-definitions.md"
BuildKitBuildType02 = "https://mobyproject.org/buildkit@v1"
ProvenanceSLSA1 = ProvenanceSLSA("v1")
ProvenanceSLSA02 = ProvenanceSLSA("v0.2")
)
type ProvenanceSLSA string
var provenanceSLSAs = []ProvenanceSLSA{
ProvenanceSLSA1,
ProvenanceSLSA02,
}
type BuildConfig struct {
Definition []BuildStep `json:"llbDefinition,omitempty"`
DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"`
@@ -80,18 +91,6 @@ type Sources struct {
Local []LocalSource
}
const (
ProvenanceSLSA1 = ProvenanceSLSA("v1")
ProvenanceSLSA02 = ProvenanceSLSA("v0.2")
)
type ProvenanceSLSA string
var provenanceSLSAs = []ProvenanceSLSA{
ProvenanceSLSA1,
ProvenanceSLSA02,
}
func (ps *ProvenanceSLSA) Validate() error {
if *ps == "" {
return errors.New("provenance SLSA version cannot be empty")
@@ -188,16 +187,63 @@ type BuildKitComplete struct {
ResolvedDependencies bool `json:"resolvedDependencies"`
}
// ConvertSLSA02ToSLSA1 converts a SLSA 0.2 provenance predicate to a SLSA 1.0
// provenance predicate.
// FIXME: It should be the other way around when v1 is the default.
func ConvertSLSA02ToSLSA1(p02 *ProvenancePredicateSLSA02) *ProvenancePredicateSLSA1 {
if p02 == nil {
return nil
// ConvertToSLSA02 converts to a SLSA v0.2 provenance predicate.
func (p *ProvenancePredicateSLSA1) ConvertToSLSA02() *ProvenancePredicateSLSA02 {
var materials []slsa02.ProvenanceMaterial
for _, m := range p.BuildDefinition.ResolvedDependencies {
materials = append(materials, slsa02.ProvenanceMaterial{
URI: m.URI,
Digest: m.Digest,
})
}
var meta *ProvenanceMetadataSLSA02
if p.RunDetails.Metadata != nil {
meta = &ProvenanceMetadataSLSA02{
ProvenanceMetadata: slsa02.ProvenanceMetadata{
BuildInvocationID: p.RunDetails.Metadata.InvocationID,
BuildStartedOn: p.RunDetails.Metadata.StartedOn,
BuildFinishedOn: p.RunDetails.Metadata.FinishedOn,
Completeness: slsa02.ProvenanceComplete{
Parameters: p.RunDetails.Metadata.Completeness.Request,
Environment: true,
Materials: p.RunDetails.Metadata.Completeness.ResolvedDependencies,
},
Reproducible: p.RunDetails.Metadata.Reproducible,
},
BuildKitMetadata: p.RunDetails.Metadata.BuildKitMetadata,
Hermetic: p.RunDetails.Metadata.Hermetic,
}
}
return &ProvenancePredicateSLSA02{
ProvenancePredicate: slsa02.ProvenancePredicate{
Builder: slsa02.ProvenanceBuilder{
ID: p.RunDetails.Builder.ID,
},
BuildType: BuildKitBuildType02,
Materials: materials,
},
Invocation: ProvenanceInvocationSLSA02{
ConfigSource: slsa02.ConfigSource{
URI: p.BuildDefinition.ExternalParameters.ConfigSource.URI,
Digest: p.BuildDefinition.ExternalParameters.ConfigSource.Digest,
EntryPoint: p.BuildDefinition.ExternalParameters.ConfigSource.Path,
},
Parameters: p.BuildDefinition.ExternalParameters.Request,
Environment: Environment{
Platform: p.BuildDefinition.InternalParameters.BuilderPlatform,
},
},
BuildConfig: p.BuildDefinition.InternalParameters.BuildConfig,
Metadata: meta,
}
}
// ConvertToSLSA1 converts to a SLSA v1 provenance predicate.
func (p *ProvenancePredicateSLSA02) ConvertToSLSA1() *ProvenancePredicateSLSA1 {
var resolvedDeps []slsa1.ResourceDescriptor
for _, m := range p02.Materials {
for _, m := range p.Materials {
resolvedDeps = append(resolvedDeps, slsa1.ResourceDescriptor{
URI: m.URI,
Digest: m.Digest,
@@ -206,45 +252,45 @@ func ConvertSLSA02ToSLSA1(p02 *ProvenancePredicateSLSA02) *ProvenancePredicateSL
buildDef := ProvenanceBuildDefinitionSLSA1{
ProvenanceBuildDefinition: slsa1.ProvenanceBuildDefinition{
BuildType: "https://github.com/moby/buildkit/blob/master/docs/attestations/slsa-definitions.md",
BuildType: BuildKitBuildType1,
ResolvedDependencies: resolvedDeps,
},
ExternalParameters: ProvenanceExternalParametersSLSA1{
ConfigSource: ProvenanceConfigSourceSLSA1{
URI: p02.Invocation.ConfigSource.URI,
Digest: p02.Invocation.ConfigSource.Digest,
Path: p02.Invocation.ConfigSource.EntryPoint,
URI: p.Invocation.ConfigSource.URI,
Digest: p.Invocation.ConfigSource.Digest,
Path: p.Invocation.ConfigSource.EntryPoint,
},
Request: p02.Invocation.Parameters,
Request: p.Invocation.Parameters,
},
InternalParameters: ProvenanceInternalParametersSLSA1{
BuildConfig: p02.BuildConfig,
BuilderPlatform: p02.Invocation.Environment.Platform,
BuildConfig: p.BuildConfig,
BuilderPlatform: p.Invocation.Environment.Platform,
},
}
var meta *ProvenanceMetadataSLSA1
if p02.Metadata != nil {
if p.Metadata != nil {
meta = &ProvenanceMetadataSLSA1{
BuildMetadata: slsa1.BuildMetadata{
InvocationID: p02.Metadata.BuildInvocationID,
StartedOn: p02.Metadata.BuildStartedOn,
FinishedOn: p02.Metadata.BuildFinishedOn,
InvocationID: p.Metadata.BuildInvocationID,
StartedOn: p.Metadata.BuildStartedOn,
FinishedOn: p.Metadata.BuildFinishedOn,
},
BuildKitMetadata: p02.Metadata.BuildKitMetadata,
Hermetic: p02.Metadata.Hermetic,
BuildKitMetadata: p.Metadata.BuildKitMetadata,
Hermetic: p.Metadata.Hermetic,
Completeness: BuildKitComplete{
Request: p02.Metadata.Completeness.Parameters,
ResolvedDependencies: p02.Metadata.Completeness.Materials,
Request: p.Metadata.Completeness.Parameters,
ResolvedDependencies: p.Metadata.Completeness.Materials,
},
Reproducible: p02.Metadata.Reproducible,
Reproducible: p.Metadata.Reproducible,
}
}
runDetails := ProvenanceRunDetailsSLSA1{
ProvenanceRunDetails: slsa1.ProvenanceRunDetails{
Builder: slsa1.Builder{
ID: p02.Builder.ID,
ID: p.Builder.ID,
// TODO: handle builder components versions
// Version: map[string]string{
// "buildkit": version.Version,

View File

@@ -21,6 +21,7 @@ import (
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/util/estargz"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/imageutil"
@@ -81,7 +82,7 @@ func mainManifestKey(desc ocispecs.Descriptor, platform ocispecs.Platform, layer
if err != nil {
return "", err
}
return digest.FromBytes(dt), nil
return cachedigest.FromBytes(dt, cachedigest.TypeJSON)
}
func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cacheKey string, imgDigest string, cacheOpts solver.CacheOpts, cacheDone bool, err error) {
@@ -292,7 +293,7 @@ func cacheKeyFromConfig(dt []byte, layerLimit *int) (digest.Digest, error) {
if layerLimit != nil {
return "", errors.Wrap(err, "failed to parse image config")
}
return digest.FromBytes(dt), nil // digest of config
return cachedigest.FromBytes(dt, cachedigest.TypeJSON) // digest of config
}
if layerLimit != nil {
l := *layerLimit

View File

@@ -28,6 +28,7 @@ import (
"github.com/moby/buildkit/source"
srctypes "github.com/moby/buildkit/source/types"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/util/tracing"
"github.com/moby/buildkit/version"
digest "github.com/opencontainers/go-digest"
@@ -198,7 +199,10 @@ func (hs *httpSourceHandler) formatCacheKey(filename string, dgst digest.Digest,
if err != nil {
return dgst
}
return digest.FromBytes(dt)
if v, err := cachedigest.FromBytes(dt, cachedigest.TypeJSON); err == nil {
return v
}
return dgst
}
func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {

View File

@@ -19,10 +19,10 @@ import (
"github.com/moby/buildkit/source"
srctypes "github.com/moby/buildkit/source/types"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/cachedigest"
"github.com/moby/buildkit/util/progress"
"github.com/moby/patternmatcher"
"github.com/moby/sys/user"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
fstypes "github.com/tonistiigi/fsutil/types"
@@ -154,7 +154,11 @@ func (ls *localSourceHandler) CacheKey(ctx context.Context, g session.Group, ind
if err != nil {
return "", "", nil, false, err
}
return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), digest.FromBytes(dt).String(), nil, true, nil
dgst, err := cachedigest.FromBytes(dt, cachedigest.TypeJSON)
if err != nil {
return "", "", nil, false, err
}
return "session:" + ls.src.Name + ":" + dgst.String(), dgst.String(), nil, true, nil
}
func (ls *localSourceHandler) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {

168
vendor/github.com/moby/buildkit/util/cachedigest/db.go generated vendored Normal file
View File

@@ -0,0 +1,168 @@
package cachedigest
import (
"context"
"crypto/sha256"
"sync"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"go.etcd.io/bbolt"
)
var ErrInvalidEncoding = errors.Errorf("invalid encoding")
var ErrNotFound = errors.Errorf("not found")
const bucketName = "byhash"
type DB struct {
db *bbolt.DB
wg sync.WaitGroup
}
var defaultDB = &DB{}
func SetDefaultDB(db *DB) {
defaultDB = db
}
func GetDefaultDB() *DB {
return defaultDB
}
func NewDB(path string) (*DB, error) {
db, err := bbolt.Open(path, 0600, nil)
if err != nil {
return nil, err
}
return &DB{db: db}, nil
}
func (d *DB) Close() error {
if d.db != nil {
d.wg.Wait()
return d.db.Close()
}
return nil
}
func (d *DB) NewHash(typ Type) *Hash {
return &Hash{
h: sha256.New(),
typ: typ,
db: d,
}
}
func (d *DB) FromBytes(dt []byte, typ Type) (digest.Digest, error) {
dgst := digest.FromBytes(dt)
d.saveFrames(dgst.String(), []Frame{
{ID: FrameIDType, Data: []byte(string(typ))},
{ID: FrameIDData, Data: dt},
})
return dgst, nil
}
func (d *DB) saveFrames(key string, frames []Frame) {
if d.db == nil {
return
}
d.wg.Add(1)
go func() {
defer d.wg.Done()
val, err := encodeFrames(frames)
if err != nil {
// Optionally log error
return
}
_ = d.db.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(bucketName))
if err != nil {
return err
}
return b.Put([]byte(key), val)
})
}()
}
func (d *DB) Get(ctx context.Context, dgst string) (Type, []Frame, error) {
if d.db == nil {
return "", nil, errors.WithStack(ErrNotFound)
}
parsed, err := digest.Parse(dgst)
if err != nil {
return "", nil, errors.Wrap(err, "invalid digest key")
}
var typ Type
var resultFrames []Frame
err = d.db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(bucketName))
if b == nil {
return errors.WithStack(ErrNotFound)
}
val := b.Get([]byte(parsed.String()))
if val == nil {
return errors.WithStack(ErrNotFound)
}
frames, err := decodeFrames(val)
if err != nil {
return err
}
for _, f := range frames {
switch f.ID {
case FrameIDType:
typ = Type(f.Data)
case FrameIDData, FrameIDSkip:
resultFrames = append(resultFrames, f)
}
}
return nil
})
if err != nil {
return "", nil, err
}
return typ, resultFrames, nil
}
func (d *DB) All(ctx context.Context, cb func(key string, typ Type, frames []Frame) error) error {
if d.db == nil {
return nil
}
return d.db.View(func(tx *bbolt.Tx) error {
select {
case <-ctx.Done():
return context.Cause(ctx)
default:
}
b := tx.Bucket([]byte(bucketName))
if b == nil {
return nil
}
return b.ForEach(func(k, v []byte) error {
keyStr := string(k)
_, err := digest.Parse(keyStr)
if err != nil {
return errors.Wrapf(err, "invalid digest key: %s", keyStr)
}
frames, err := decodeFrames(v)
if err != nil {
return err
}
var typ Type
var dataFrames []Frame
for _, f := range frames {
switch f.ID {
case FrameIDType:
typ = Type(f.Data)
case FrameIDData, FrameIDSkip:
dataFrames = append(dataFrames, f)
}
}
return cb(keyStr, typ, dataFrames)
})
})
}
func (d *DB) Wait() {
d.wg.Wait()
}

View File

@@ -0,0 +1,164 @@
package cachedigest
import (
"bytes"
"encoding/binary"
"hash"
"regexp"
"sync"
"github.com/moby/buildkit/util/bklog"
digest "github.com/opencontainers/go-digest"
)
type Type string
const (
TypeJSON Type = "json"
TypeString Type = "string"
TypeStringList Type = "string-list"
TypeDigestList Type = "digest-list"
TypeFileList Type = "file-list"
TypeFile Type = "file"
)
func (t Type) String() string {
return string(t)
}
func NewHash(typ Type) *Hash {
return defaultDB.NewHash(typ)
}
func FromBytes(dt []byte, t Type) (digest.Digest, error) {
return defaultDB.FromBytes(dt, t)
}
type Hash struct {
h hash.Hash
typ Type
db *DB
frames []Frame
}
func (h *Hash) Reset() {
h.h.Reset()
h.frames = h.frames[:0]
}
func (h *Hash) BlockSize() int {
return h.h.BlockSize()
}
func (h *Hash) Size() int {
return h.h.Size()
}
func (h *Hash) Write(p []byte) (n int, err error) {
n, err = h.h.Write(p)
if n > 0 && h.db != nil {
h.frames = append(h.frames, Frame{ID: FrameIDData, Data: bytes.Clone(p[:n])})
}
return n, err
}
func (h *Hash) WriteNoDebug(p []byte) (n int, err error) {
n, err = h.h.Write(p)
if n > 0 && h.db != nil {
if len(h.frames) > 0 && h.frames[len(h.frames)-1].ID == FrameIDSkip {
last := &h.frames[len(h.frames)-1]
prevLen := binary.LittleEndian.Uint32(last.Data)
binary.LittleEndian.PutUint32(last.Data, prevLen+uint32(n))
} else {
lenBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(lenBytes, uint32(n))
h.frames = append(h.frames, Frame{ID: FrameIDSkip, Data: lenBytes})
}
}
return n, err
}
func (h *Hash) Sum() digest.Digest {
sum := digest.NewDigest(digest.SHA256, h.h)
if h.db != nil && len(h.frames) > 0 {
frames := []Frame{
{ID: FrameIDType, Data: []byte(string(h.typ))},
}
frames = append(frames, h.frames...)
h.db.saveFrames(sum.String(), frames)
}
return sum
}
type Record struct {
Digest digest.Digest `json:"digest"`
Type Type `json:"type"`
Data []Frame `json:"data,omitempty"`
SubRecords []*Record `json:"subRecords,omitempty"`
}
var shaRegexpOnce = sync.OnceValue(func() *regexp.Regexp {
return regexp.MustCompile(`\bsha256:[a-f0-9]{64}\b`)
})
func (r *Record) LoadSubRecords(loader func(d digest.Digest) (Type, []Frame, error)) error {
var checksums []string
var dt []byte
for _, f := range r.Data {
if f.ID != FrameIDData {
continue
}
dt = append(dt, f.Data...)
}
switch r.Type {
case TypeString:
// find regex matches in the data
matches := shaRegexpOnce().FindAllSubmatch(dt, -1)
for _, match := range matches {
if len(match) > 0 {
checksums = append(checksums, string(match[0]))
}
}
case TypeDigestList:
for _, dgst := range bytes.Split(dt, []byte{0}) {
checksums = append(checksums, string(dgst))
}
case TypeFileList:
for _, nameChecksumPair := range bytes.Split(dt, []byte{0}) {
idx := bytes.LastIndex(nameChecksumPair, []byte("sha256:"))
if idx < 0 {
bklog.L.Warnf("invalid file list entry %q, missing sha256 prefix", nameChecksumPair)
continue
}
checksums = append(checksums, string(nameChecksumPair[idx:]))
}
}
dgsts := make([]digest.Digest, 0, len(checksums))
for _, dgst := range checksums {
if d, err := digest.Parse(dgst); err == nil {
dgsts = append(dgsts, d)
} else {
bklog.L.Warnf("failed to parse debug info digest %q: %v", dgst, err)
}
}
for _, dgst := range dgsts {
typ, frames, err := loader(digest.Digest(dgst))
if err != nil {
bklog.L.Warnf("failed to load sub-record for %s: %v", dgst, err)
continue
}
rr := &Record{
Digest: digest.Digest(dgst),
Type: typ,
Data: frames,
}
if err := rr.LoadSubRecords(loader); err != nil {
return err
}
r.SubRecords = append(r.SubRecords, rr)
}
return nil
}

View File

@@ -0,0 +1,68 @@
package cachedigest
import (
"encoding/binary"
"github.com/pkg/errors"
)
type FrameID uint32
const (
FrameIDType FrameID = 1
FrameIDData FrameID = 2
FrameIDSkip FrameID = 3
)
func (f FrameID) String() string {
switch f {
case FrameIDType:
return "type"
case FrameIDData:
return "data"
case FrameIDSkip:
return "skip"
default:
return "unknown"
}
}
type Frame struct {
ID FrameID `json:"type"`
Data []byte `json:"data,omitempty"`
}
// encodeFrames encodes a series of frames: [frameID:uint32][len:uint32][data:len]
func encodeFrames(frames []Frame) ([]byte, error) {
var out []byte
for _, f := range frames {
buf := make([]byte, 8+len(f.Data))
binary.BigEndian.PutUint32(buf[0:4], uint32(f.ID))
binary.BigEndian.PutUint32(buf[4:8], uint32(len(f.Data)))
copy(buf[8:], f.Data)
out = append(out, buf...)
}
return out, nil
}
// decodeFrames decodes a series of frames from data.
func decodeFrames(data []byte) ([]Frame, error) {
var frames []Frame
i := 0
for i+8 <= len(data) {
frameID := binary.BigEndian.Uint32(data[i : i+4])
length := binary.BigEndian.Uint32(data[i+4 : i+8])
if i+8+int(length) > len(data) {
return nil, errors.WithStack(ErrInvalidEncoding)
}
frames = append(frames, Frame{
ID: FrameID(frameID),
Data: data[i+8 : i+8+int(length)],
})
i += 8 + int(length)
}
if i != len(data) {
return nil, errors.WithStack(ErrInvalidEncoding)
}
return frames, nil
}

View File

@@ -0,0 +1,94 @@
package errutil
import (
"encoding/json"
"errors"
"fmt"
"github.com/containerd/containerd/v2/core/remotes/docker"
remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
)
const (
maxPrintedBodySize = 256
)
func WithDetails(err error) error {
if err == nil {
return nil
}
var errStatus remoteserrors.ErrUnexpectedStatus
if errors.As(err, &errStatus) {
var dErr docker.Errors
if err1 := json.Unmarshal(errStatus.Body, &dErr); err1 == nil && len(dErr) > 0 {
return &formattedDockerError{dErr: dErr}
}
return verboseUnexpectedStatusError{ErrUnexpectedStatus: errStatus}
}
return err
}
type verboseUnexpectedStatusError struct {
remoteserrors.ErrUnexpectedStatus
}
func (e verboseUnexpectedStatusError) Unwrap() error {
return e.ErrUnexpectedStatus
}
func (e verboseUnexpectedStatusError) Error() string {
if len(e.Body) == 0 {
return e.ErrUnexpectedStatus.Error()
}
var details string
var errDetails struct {
Details string `json:"details"`
}
if err := json.Unmarshal(e.Body, &errDetails); err == nil && errDetails.Details != "" {
details = errDetails.Details
} else {
if len(e.Body) > maxPrintedBodySize {
details = string(e.Body[:maxPrintedBodySize]) + fmt.Sprintf("... (%d bytes truncated)", len(e.Body)-maxPrintedBodySize)
} else {
details = string(e.Body)
}
}
return fmt.Sprintf("%s: %s", e.ErrUnexpectedStatus.Error(), details)
}
type formattedDockerError struct {
dErr docker.Errors
}
func (e *formattedDockerError) Error() string {
format := func(err error) string {
out := err.Error()
var dErr docker.Error
if errors.As(err, &dErr) {
if v, ok := dErr.Detail.(string); ok && v != "" {
out += " - " + v
}
}
return out
}
switch len(e.dErr) {
case 0:
return "<nil>"
case 1:
return format(e.dErr[0])
default:
msg := "errors:\n"
for _, err := range e.dErr {
msg += format(err) + "\n"
}
return msg
}
}
func (e *formattedDockerError) Unwrap() error {
return e.dErr
}

View File

@@ -86,7 +86,7 @@ func ParseURL(remote string) (*GitURL, error) {
if err != nil {
return nil, err
}
return fromURL(url), nil
return FromURL(url), nil
}
if url, err := sshutil.ParseSCPStyleURL(remote); err == nil {
@@ -105,7 +105,7 @@ func IsGitTransport(remote string) bool {
return sshutil.IsImplicitSSHTransport(remote)
}
func fromURL(url *url.URL) *GitURL {
func FromURL(url *url.URL) *GitURL {
withoutOpts := *url
withoutOpts.Fragment = ""
return &GitURL{

View File

@@ -12,7 +12,6 @@ import (
"github.com/containerd/containerd/v2/core/remotes"
"github.com/containerd/containerd/v2/core/remotes/docker"
cerrdefs "github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/distribution/reference"
intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/moby/buildkit/session"
@@ -152,7 +151,7 @@ func Push(ctx context.Context, sm *session.Manager, sid string, provider content
func skipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc {
return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) {
if images.IsNonDistributable(desc.MediaType) {
log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob")
bklog.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob")
return nil, images.ErrSkipDesc
}
return f(ctx, desc)

View File

@@ -0,0 +1,459 @@
// Package resolvconf is used to generate a container's /etc/resolv.conf file.
//
// Constructor Load and Parse read a resolv.conf file from the filesystem or
// a reader respectively, and return a ResolvConf object.
//
// The ResolvConf object can then be updated with overrides for nameserver,
// search domains, and DNS options.
//
// ResolvConf can then be transformed to make it suitable for legacy networking,
// a network with an internal nameserver, or used as-is for host networking.
//
// This package includes methods to write the file for the container, along with
// a hash that can be used to detect modifications made by the user to avoid
// overwriting those updates.
package resolvconf
import (
"bufio"
"bytes"
"context"
"io"
"net/netip"
"os"
"slices"
"strconv"
"strings"
"github.com/moby/buildkit/errdefs"
"github.com/moby/buildkit/util/bklog"
)
// Fallback nameservers, to use if none can be obtained from the host or command
// line options.
var (
defaultIPv4NSs = []netip.Addr{
netip.MustParseAddr("8.8.8.8"),
netip.MustParseAddr("8.8.4.4"),
}
defaultIPv6NSs = []netip.Addr{
netip.MustParseAddr("2001:4860:4860::8888"),
netip.MustParseAddr("2001:4860:4860::8844"),
}
)
// ResolvConf represents a resolv.conf file. It can be constructed by
// reading a resolv.conf file, using method Parse().
type ResolvConf struct {
nameServers []netip.Addr
search []string
options []string
other []string // Unrecognised directives from the host's file, if any.
md metadata
}
// ExtDNSEntry represents a nameserver address that was removed from the
// container's resolv.conf when it was transformed by TransformForIntNS(). These
// are addresses read from the host's file, or applied via an override ('--dns').
type ExtDNSEntry struct {
Addr netip.Addr
HostLoopback bool // The address is loopback, in the host's namespace.
}
func (ed ExtDNSEntry) String() string {
if ed.HostLoopback {
return "host(" + ed.Addr.String() + ")"
}
return ed.Addr.String()
}
// metadata is used to track where components of the generated file have come
// from, in order to generate comments in the file for debug/info. Struct members
// are exported for use by 'text/template'.
type metadata struct {
SourcePath string
Header string
NSOverride bool
SearchOverride bool
OptionsOverride bool
NDotsFrom string
Transform string
InvalidNSs []string
ExtNameServers []ExtDNSEntry
Warnings []string
}
// Load opens a file at path and parses it as a resolv.conf file.
// On error, the returned ResolvConf will be zero-valued.
func Load(path string) (ResolvConf, error) {
f, err := os.Open(path)
if err != nil {
return ResolvConf{}, err
}
defer f.Close()
return Parse(f, path)
}
// Parse parses a resolv.conf file from reader.
// path is optional if reader is an *os.File.
// On error, the returned ResolvConf will be zero-valued.
func Parse(reader io.Reader, path string) (ResolvConf, error) {
var rc ResolvConf
rc.md.SourcePath = path
if path == "" {
if namer, ok := reader.(interface{ Name() string }); ok {
rc.md.SourcePath = namer.Name()
}
}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
rc.processLine(scanner.Text())
}
if err := scanner.Err(); err != nil {
return ResolvConf{}, errdefs.Internal(err)
}
if _, ok := rc.Option("ndots"); ok {
rc.md.NDotsFrom = "host"
}
return rc, nil
}
// SetHeader sets the content to be included verbatim at the top of the
// generated resolv.conf file. No formatting or checking is done on the
// string. It must be valid resolv.conf syntax. (Comments must have '#'
// or ';' in the first column of each line).
//
// For example:
//
// SetHeader("# My resolv.conf\n# This file was generated.")
func (rc *ResolvConf) SetHeader(c string) {
rc.md.Header = c
}
// NameServers returns addresses used in nameserver directives.
func (rc *ResolvConf) NameServers() []netip.Addr {
return slices.Clone(rc.nameServers)
}
// OverrideNameServers replaces the current set of nameservers.
func (rc *ResolvConf) OverrideNameServers(nameServers []netip.Addr) {
rc.nameServers = nameServers
rc.md.NSOverride = true
}
// Search returns the current DNS search domains.
func (rc *ResolvConf) Search() []string {
return slices.Clone(rc.search)
}
// OverrideSearch replaces the current DNS search domains.
func (rc *ResolvConf) OverrideSearch(search []string) {
var filtered []string
for _, s := range search {
if s != "." {
filtered = append(filtered, s)
}
}
rc.search = filtered
rc.md.SearchOverride = true
}
// Options returns the current options.
func (rc *ResolvConf) Options() []string {
return slices.Clone(rc.options)
}
// Option finds the last option named search, and returns (value, true) if
// found, else ("", false). Options are treated as "name:value", where the
// ":value" may be omitted.
//
// For example, for "ndots:1 edns0":
//
// Option("ndots") -> ("1", true)
// Option("edns0") -> ("", true)
func (rc *ResolvConf) Option(search string) (string, bool) {
for i := len(rc.options) - 1; i >= 0; i-- {
k, v, _ := strings.Cut(rc.options[i], ":")
if k == search {
return v, true
}
}
return "", false
}
// OverrideOptions replaces the current DNS options.
func (rc *ResolvConf) OverrideOptions(options []string) {
rc.options = slices.Clone(options)
rc.md.NDotsFrom = ""
if _, exists := rc.Option("ndots"); exists {
rc.md.NDotsFrom = "override"
}
rc.md.OptionsOverride = true
}
// AddOption adds a single DNS option.
func (rc *ResolvConf) AddOption(option string) {
if len(option) > 6 && option[:6] == "ndots:" {
rc.md.NDotsFrom = "internal"
}
rc.options = append(rc.options, option)
}
// TransformForLegacyNw makes sure the resolv.conf file will be suitable for
// use in a legacy network (one that has no internal resolver).
// - Remove loopback addresses inherited from the host's resolv.conf, because
// they'll only work in the host's namespace.
// - Remove IPv6 addresses if !ipv6.
// - Add default nameservers if there are no addresses left.
func (rc *ResolvConf) TransformForLegacyNw(ipv6 bool) {
rc.md.Transform = "legacy"
if rc.md.NSOverride {
return
}
var filtered []netip.Addr
for _, addr := range rc.nameServers {
if !addr.IsLoopback() && (!addr.Is6() || ipv6) {
filtered = append(filtered, addr)
}
}
rc.nameServers = filtered
if len(rc.nameServers) == 0 {
bklog.G(context.TODO()).Info("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers")
rc.nameServers = defaultNSAddrs(ipv6)
rc.md.Warnings = append(rc.md.Warnings, "Used default nameservers.")
}
}
// TransformForIntNS makes sure the resolv.conf file will be suitable for
// use in a network sandbox that has an internal DNS resolver.
// - Add internalNS as a nameserver.
// - Remove other nameservers, stashing them as ExtNameServers for the
// internal resolver to use.
// - Mark ExtNameServers that must be accessed from the host namespace.
// - If no ExtNameServer addresses are found, use the defaults.
// - Ensure there's an 'options' value for each entry in reqdOptions. If the
// option includes a ':', and an option with a matching prefix exists, it
// is not modified.
func (rc *ResolvConf) TransformForIntNS(
internalNS netip.Addr,
reqdOptions []string,
) ([]ExtDNSEntry, error) {
// Add each of the nameservers read from the host's /etc/hosts or supplied as an
// override to ExtNameServers, for the internal resolver to talk to. Addresses
// read from host config should be accessed from the host's network namespace
// (HostLoopback=true). Addresses supplied as overrides are accessed from the
// container's namespace.
rc.md.ExtNameServers = nil
for _, addr := range rc.nameServers {
rc.md.ExtNameServers = append(rc.md.ExtNameServers, ExtDNSEntry{
Addr: addr,
HostLoopback: !rc.md.NSOverride,
})
}
// The transformed config only lists the internal nameserver.
rc.nameServers = []netip.Addr{internalNS}
// For each option required by the nameserver, add it if not already present. If
// the option is already present, don't override it. Apart from ndots - if the
// ndots value is invalid and an ndots option is required, replace the existing
// value.
for _, opt := range reqdOptions {
optName, _, _ := strings.Cut(opt, ":")
if optName == "ndots" {
rc.options = removeInvalidNDots(rc.options)
// No need to update rc.md.NDotsFrom, if there is no ndots option remaining,
// it'll be set to "internal" when the required value is added.
}
if _, exists := rc.Option(optName); !exists {
rc.AddOption(opt)
}
}
rc.md.Transform = "internal resolver"
if len(rc.md.ExtNameServers) == 0 {
rc.md.Warnings = append(rc.md.Warnings, "NO EXTERNAL NAMESERVERS DEFINED")
}
return slices.Clone(rc.md.ExtNameServers), nil
}
// Generate returns content suitable for writing to a resolv.conf file. If comments
// is true, the file will include header information if supplied, and a trailing
// comment that describes how the file was constructed and lists external resolvers.
func (rc *ResolvConf) Generate(comments bool) ([]byte, error) {
var b bytes.Buffer
b.Grow(512) // estimated size for a regular resolv.conf we produce.
if comments && rc.md.Header != "" {
b.WriteString(rc.md.Header + "\n")
b.WriteByte('\n')
}
for _, ns := range rc.nameServers {
b.WriteString("nameserver ")
b.WriteString(ns.String())
b.WriteByte('\n')
}
if len(rc.search) > 0 {
b.WriteString("search ")
for i, s := range rc.search {
if i > 0 {
b.WriteByte(' ')
}
b.WriteString(s)
}
b.WriteByte('\n')
}
if len(rc.options) > 0 {
b.WriteString("options ")
for i, s := range rc.options {
if i > 0 {
b.WriteByte(' ')
}
b.WriteString(s)
}
b.WriteByte('\n')
}
for _, o := range rc.other {
b.WriteString(o)
b.WriteByte('\n')
}
if comments {
b.WriteByte('\n')
b.WriteString("# Based on host file: '" + rc.md.SourcePath + "'")
if rc.md.Transform != "" {
b.WriteString(" (" + rc.md.Transform + ")")
}
b.WriteByte('\n')
for _, w := range rc.md.Warnings {
b.WriteString("# ")
b.WriteString(w)
b.WriteByte('\n')
}
if len(rc.md.ExtNameServers) > 0 {
b.WriteString("# ExtServers: [")
for i, ext := range rc.md.ExtNameServers {
if i > 0 {
b.WriteByte(' ')
}
b.WriteString(ext.String())
}
b.WriteByte(']')
b.WriteByte('\n')
}
if len(rc.md.InvalidNSs) > 0 {
b.WriteString("# Invalid nameservers: [")
for i, ext := range rc.md.InvalidNSs {
if i > 0 {
b.WriteByte(' ')
}
b.WriteString(ext)
}
b.WriteByte(']')
b.WriteByte('\n')
}
b.WriteString("# Overrides: [")
var overrides int
if rc.md.NSOverride {
b.WriteString("nameservers")
overrides++
}
if rc.md.SearchOverride {
if overrides > 0 {
b.WriteByte(' ')
}
b.WriteString("search")
overrides++
}
if rc.md.OptionsOverride {
if overrides > 0 {
b.WriteByte(' ')
}
b.WriteString("options")
}
b.WriteByte(']')
b.WriteByte('\n')
if rc.md.NDotsFrom != "" {
b.WriteString("# Option ndots from: " + rc.md.NDotsFrom + "\n")
}
}
return b.Bytes(), nil
}
func (rc *ResolvConf) processLine(line string) {
// Strip blank lines and comments.
if line == "" || line[0] == '#' || line[0] == ';' {
return
}
fields := strings.Fields(line)
if len(fields) == 0 {
return
}
switch fields[0] {
case "nameserver":
if len(fields) < 2 {
return
}
if addr, err := netip.ParseAddr(fields[1]); err != nil {
rc.md.InvalidNSs = append(rc.md.InvalidNSs, fields[1])
} else {
rc.nameServers = append(rc.nameServers, addr)
}
case "domain":
// 'domain' is an obsolete name for 'search'.
fallthrough
case "search":
if len(fields) < 2 {
return
}
// Only the last 'search' directive is used.
rc.search = fields[1:]
case "options":
if len(fields) < 2 {
return
}
// Accumulate options.
rc.options = append(rc.options, fields[1:]...)
default:
// Copy anything that's not a recognised directive.
rc.other = append(rc.other, line)
}
}
func defaultNSAddrs(ipv6 bool) []netip.Addr {
var addrs []netip.Addr
addrs = append(addrs, defaultIPv4NSs...)
if ipv6 {
addrs = append(addrs, defaultIPv6NSs...)
}
return addrs
}
// removeInvalidNDots filters ill-formed "ndots" settings from options.
// The backing array of the options slice is reused.
func removeInvalidNDots(options []string) []string {
n := 0
for _, opt := range options {
k, v, hasSep := strings.Cut(opt, ":")
if k == "ndots" {
if !hasSep || v == "" {
continue
}
ndots, err := strconv.Atoi(v)
if err != nil || ndots < 0 {
continue
}
}
options[n] = opt
n++
}
clear(options[n:]) // Zero out the obsolete elements, for GC.
return options[:n]
}

View File

@@ -17,7 +17,8 @@ import (
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/buildkit/session"
sessionauth "github.com/moby/buildkit/session/auth"
log "github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/errutil"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/version"
"github.com/pkg/errors"
@@ -367,7 +368,7 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se
// fetch token for the resource scope
if to.Secret != "" {
defer func() {
err = errors.Wrap(err, "failed to fetch oauth token")
err = errors.Wrap(errutil.WithDetails(err), "failed to fetch oauth token")
}()
// try GET first because Docker Hub does not support POST
// switch once support has landed
@@ -390,7 +391,7 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se
token = resp.AccessToken
return nil, nil
}
log.G(ctx).WithFields(logrus.Fields{
bklog.G(ctx).WithFields(logrus.Fields{
"status": errStatus.Status,
"body": string(errStatus.Body),
}).Debugf("token request failed")

View File

@@ -2,35 +2,29 @@ package tracing
import (
"context"
stderrors "errors"
"github.com/hashicorp/go-multierror"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
type MultiSpanExporter []sdktrace.SpanExporter
func (m MultiSpanExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) (err error) {
func (m MultiSpanExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
var errs []error
for _, exp := range m {
if e := exp.ExportSpans(ctx, spans); e != nil {
if err != nil {
err = multierror.Append(err, e)
continue
}
err = e
errs = append(errs, e)
}
}
return err
return stderrors.Join(errs...)
}
func (m MultiSpanExporter) Shutdown(ctx context.Context) (err error) {
func (m MultiSpanExporter) Shutdown(ctx context.Context) error {
var errs []error
for _, exp := range m {
if e := exp.Shutdown(ctx); e != nil {
if err != nil {
err = multierror.Append(err, e)
continue
}
err = e
errs = append(errs, e)
}
}
return err
return stderrors.Join(errs...)
}

View File

@@ -16,7 +16,7 @@ import (
"github.com/containerd/containerd/v2/pkg/archive/compression"
"github.com/containerd/containerd/v2/pkg/labels"
cerrdefs "github.com/containerd/errdefs"
log "github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/bklog"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@@ -91,7 +91,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt
cw.Close()
if newReference {
if err := s.store.Abort(ctx, config.Reference); err != nil {
log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload")
bklog.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload")
}
}
}
@@ -258,7 +258,7 @@ func makeWindowsLayer(ctx context.Context, w io.Writer) (io.Writer, func(error),
return tarWriter.Close()
}()
if err != nil {
log.G(ctx).Errorf("makeWindowsLayer %+v", err)
bklog.G(ctx).Errorf("makeWindowsLayer %+v", err)
}
pw.CloseWithError(err)
done <- err

View File

@@ -2,6 +2,7 @@ package base
import (
"context"
stderrors "errors"
"fmt"
"os"
"path/filepath"
@@ -13,7 +14,6 @@ import (
"github.com/containerd/containerd/v2/core/remotes/docker"
"github.com/containerd/containerd/v2/pkg/gc"
"github.com/containerd/platforms"
"github.com/hashicorp/go-multierror"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
@@ -222,21 +222,21 @@ func (w *Worker) GarbageCollect(ctx context.Context) error {
}
func (w *Worker) Close() error {
var rerr error
var errs []error
if err := w.MetadataStore.Close(); err != nil {
rerr = multierror.Append(rerr, err)
errs = append(errs, err)
}
for _, provider := range w.NetworkProviders {
if err := provider.Close(); err != nil {
rerr = multierror.Append(rerr, err)
errs = append(errs, err)
}
}
if w.ResourceMonitor != nil {
if err := w.ResourceMonitor.Close(); err != nil {
rerr = multierror.Append(rerr, err)
errs = append(errs, err)
}
}
return rerr
return stderrors.Join(errs...)
}
func (w *Worker) ContentStore() *containerdsnapshot.Store {

View File

@@ -1,8 +1,9 @@
package worker
import (
stderrors "errors"
"github.com/containerd/containerd/v2/pkg/filters"
"github.com/hashicorp/go-multierror"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/client"
"github.com/pkg/errors"
@@ -16,13 +17,13 @@ type Controller struct {
}
func (c *Controller) Close() error {
var rerr error
var errs []error
for _, w := range c.workers {
if err := w.Close(); err != nil {
rerr = multierror.Append(rerr, err)
errs = append(errs, err)
}
}
return rerr
return stderrors.Join(errs...)
}
// Add adds a local worker.

6
vendor/modules.txt vendored
View File

@@ -757,7 +757,7 @@ github.com/mitchellh/hashstructure/v2
# github.com/mitchellh/reflectwalk v1.0.2
## explicit
github.com/mitchellh/reflectwalk
# github.com/moby/buildkit v0.23.2
# github.com/moby/buildkit v0.24.0-rc1
## explicit; go 1.23.0
github.com/moby/buildkit/api/services/control
github.com/moby/buildkit/api/types
@@ -804,6 +804,7 @@ github.com/moby/buildkit/frontend/attestations
github.com/moby/buildkit/frontend/attestations/sbom
github.com/moby/buildkit/frontend/dockerfile/builder
github.com/moby/buildkit/frontend/dockerfile/command
github.com/moby/buildkit/frontend/dockerfile/dfgitutil
github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb
github.com/moby/buildkit/frontend/dockerfile/instructions
github.com/moby/buildkit/frontend/dockerfile/linter
@@ -864,6 +865,7 @@ github.com/moby/buildkit/util/appdefaults
github.com/moby/buildkit/util/archutil
github.com/moby/buildkit/util/attestation
github.com/moby/buildkit/util/bklog
github.com/moby/buildkit/util/cachedigest
github.com/moby/buildkit/util/compression
github.com/moby/buildkit/util/cond
github.com/moby/buildkit/util/contentutil
@@ -874,6 +876,7 @@ github.com/moby/buildkit/util/db/boltutil
github.com/moby/buildkit/util/disk
github.com/moby/buildkit/util/entitlements
github.com/moby/buildkit/util/entitlements/security
github.com/moby/buildkit/util/errutil
github.com/moby/buildkit/util/estargz
github.com/moby/buildkit/util/flightcontrol
github.com/moby/buildkit/util/gitutil
@@ -894,6 +897,7 @@ github.com/moby/buildkit/util/pull
github.com/moby/buildkit/util/pull/pullprogress
github.com/moby/buildkit/util/purl
github.com/moby/buildkit/util/push
github.com/moby/buildkit/util/resolvconf
github.com/moby/buildkit/util/resolver
github.com/moby/buildkit/util/resolver/config
github.com/moby/buildkit/util/resolver/limited