Merge pull request #51412 from austinvazquez/use-regular-slice-for-disk-usage

api/types: use regular slices for disk usage types
This commit is contained in:
Paweł Gronowski
2025-11-06 23:13:47 +01:00
committed by GitHub
22 changed files with 364 additions and 236 deletions

View File

@@ -2096,7 +2096,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: Summary
@@ -2279,7 +2278,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: Volume
@@ -2893,7 +2891,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: CacheRecord
@@ -5617,7 +5614,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: Summary

View File

@@ -2096,7 +2096,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: Summary
@@ -2279,7 +2278,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: Volume
@@ -2893,7 +2891,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: CacheRecord
@@ -5617,7 +5614,6 @@ definitions:
type: "array"
x-omitempty: true
items:
x-nullable: true
x-go-type:
type: Summary

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of build cache records.
//
Items []*CacheRecord `json:"Items,omitempty"`
Items []CacheRecord `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing inactive build cache records.
//

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of container summaries.
//
Items []*Summary `json:"Items,omitempty"`
Items []Summary `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing inactive containers.
//

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of image summaries.
//
Items []*Summary `json:"Items,omitempty"`
Items []Summary `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing unused images.
//

View File

@@ -37,14 +37,14 @@ type LegacyDiskUsage struct {
LayersSize int64 `json:"LayersSize,omitempty"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.Items] instead.
Images []*image.Summary `json:"Images,omitempty"`
Images []image.Summary `json:"Images,omitzero"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [ContainersDiskUsage.Items] instead.
Containers []*container.Summary `json:"Containers,omitempty"`
Containers []container.Summary `json:"Containers,omitzero"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [VolumesDiskUsage.Items] instead.
Volumes []*volume.Volume `json:"Volumes,omitempty"`
Volumes []volume.Volume `json:"Volumes,omitzero"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [BuildCacheDiskUsage.Items] instead.
BuildCache []*build.CacheRecord `json:"BuildCache,omitempty"`
BuildCache []build.CacheRecord `json:"BuildCache,omitzero"`
}

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of volumes.
//
Items []*Volume `json:"Items,omitempty"`
Items []Volume `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing inactive volumes.
//

View File

@@ -5,12 +5,15 @@ import (
"encoding/json"
"fmt"
"net/url"
"slices"
"strings"
"github.com/moby/moby/api/types/build"
"github.com/moby/moby/api/types/container"
"github.com/moby/moby/api/types/image"
"github.com/moby/moby/api/types/system"
"github.com/moby/moby/api/types/volume"
"github.com/moby/moby/client/pkg/versions"
)
// DiskUsageOptions holds parameters for [Client.DiskUsage] operations.
@@ -151,103 +154,166 @@ func (cli *Client) DiskUsage(ctx context.Context, options DiskUsageOptions) (Dis
return DiskUsageResult{}, fmt.Errorf("Error retrieving disk usage: %v", err)
}
var (
r DiskUsageResult
imagesFrom = []*image.Summary{}
containersFrom = []*container.Summary{}
volumesFrom = []*volume.Volume{}
buildCacheFrom = []*build.CacheRecord{}
)
// Generate result from a legacy response.
if versions.LessThan(cli.version, "1.52") {
return diskUsageResultFromLegacyAPI(&du), nil
}
if du.ImageUsage != nil {
var r DiskUsageResult
if idu := du.ImageUsage; idu != nil {
r.Images = ImagesDiskUsage{
ActiveImages: du.ImageUsage.ActiveImages,
Reclaimable: du.ImageUsage.Reclaimable,
TotalImages: du.ImageUsage.TotalImages,
TotalSize: du.ImageUsage.TotalSize,
ActiveImages: idu.ActiveImages,
Reclaimable: idu.Reclaimable,
TotalImages: idu.TotalImages,
TotalSize: idu.TotalSize,
}
if options.Verbose {
imagesFrom = du.ImageUsage.Items
}
} else {
// Fallback for legacy response.
r.Images = ImagesDiskUsage{
TotalSize: du.LayersSize,
}
if du.Images != nil && options.Verbose {
imagesFrom = du.Images
r.Images.Items = slices.Clone(idu.Items)
}
}
r.Images.Items = make([]image.Summary, len(imagesFrom))
for i, ii := range imagesFrom {
r.Images.Items[i] = *ii
}
if du.ContainerUsage != nil {
if cdu := du.ContainerUsage; cdu != nil {
r.Containers = ContainersDiskUsage{
ActiveContainers: du.ContainerUsage.ActiveContainers,
Reclaimable: du.ContainerUsage.Reclaimable,
TotalContainers: du.ContainerUsage.TotalContainers,
TotalSize: du.ContainerUsage.TotalSize,
ActiveContainers: cdu.ActiveContainers,
Reclaimable: cdu.Reclaimable,
TotalContainers: cdu.TotalContainers,
TotalSize: cdu.TotalSize,
}
if options.Verbose {
containersFrom = du.ContainerUsage.Items
r.Containers.Items = slices.Clone(cdu.Items)
}
} else if du.Containers != nil && options.Verbose {
// Fallback for legacy response.
containersFrom = du.Containers
}
r.Containers.Items = make([]container.Summary, len(containersFrom))
for i, c := range containersFrom {
r.Containers.Items[i] = *c
}
if du.BuildCacheUsage != nil {
if bdu := du.BuildCacheUsage; bdu != nil {
r.BuildCache = BuildCacheDiskUsage{
ActiveBuildCacheRecords: du.BuildCacheUsage.ActiveBuildCacheRecords,
Reclaimable: du.BuildCacheUsage.Reclaimable,
TotalBuildCacheRecords: du.BuildCacheUsage.TotalBuildCacheRecords,
TotalSize: du.BuildCacheUsage.TotalSize,
ActiveBuildCacheRecords: bdu.ActiveBuildCacheRecords,
Reclaimable: bdu.Reclaimable,
TotalBuildCacheRecords: bdu.TotalBuildCacheRecords,
TotalSize: bdu.TotalSize,
}
if options.Verbose {
buildCacheFrom = du.BuildCacheUsage.Items
r.BuildCache.Items = slices.Clone(bdu.Items)
}
} else if du.BuildCache != nil && options.Verbose {
// Fallback for legacy response.
buildCacheFrom = du.BuildCache
}
r.BuildCache.Items = make([]build.CacheRecord, len(buildCacheFrom))
for i, b := range buildCacheFrom {
r.BuildCache.Items[i] = *b
}
if du.VolumeUsage != nil {
if vdu := du.VolumeUsage; vdu != nil {
r.Volumes = VolumesDiskUsage{
ActiveVolumes: du.VolumeUsage.ActiveVolumes,
Reclaimable: du.VolumeUsage.Reclaimable,
TotalSize: du.VolumeUsage.TotalSize,
TotalVolumes: du.VolumeUsage.TotalVolumes,
ActiveVolumes: vdu.ActiveVolumes,
Reclaimable: vdu.Reclaimable,
TotalVolumes: vdu.TotalVolumes,
TotalSize: vdu.TotalSize,
}
if options.Verbose {
volumesFrom = du.VolumeUsage.Items
r.Volumes.Items = slices.Clone(vdu.Items)
}
} else if du.Volumes != nil && options.Verbose {
// Fallback for legacy response.
volumesFrom = du.Volumes
}
r.Volumes.Items = make([]volume.Volume, len(volumesFrom))
for i, v := range volumesFrom {
r.Volumes.Items[i] = *v
}
return r, nil
}
func diskUsageResultFromLegacyAPI(du *system.DiskUsage) DiskUsageResult {
return DiskUsageResult{
Images: imageDiskUsageFromLegacyAPI(du),
Containers: containerDiskUsageFromLegacyAPI(du),
BuildCache: buildCacheDiskUsageFromLegacyAPI(du),
Volumes: volumeDiskUsageFromLegacyAPI(du),
}
}
func imageDiskUsageFromLegacyAPI(du *system.DiskUsage) ImagesDiskUsage {
idu := ImagesDiskUsage{
TotalSize: du.LayersSize,
TotalImages: int64(len(du.Images)),
Items: du.Images,
}
var used int64
for _, i := range idu.Items {
if i.Containers > 0 {
idu.ActiveImages++
if i.Size == -1 || i.SharedSize == -1 {
continue
}
used += (i.Size - i.SharedSize)
}
}
if idu.TotalImages > 0 {
idu.Reclaimable = idu.TotalSize - used
}
return idu
}
func containerDiskUsageFromLegacyAPI(du *system.DiskUsage) ContainersDiskUsage {
cdu := ContainersDiskUsage{
TotalContainers: int64(len(du.Containers)),
Items: du.Containers,
}
var used int64
for _, c := range cdu.Items {
cdu.TotalSize += c.SizeRw
switch strings.ToLower(c.State) {
case "running", "paused", "restarting":
cdu.ActiveContainers++
used += c.SizeRw
}
}
cdu.Reclaimable = cdu.TotalSize - used
return cdu
}
func buildCacheDiskUsageFromLegacyAPI(du *system.DiskUsage) BuildCacheDiskUsage {
bdu := BuildCacheDiskUsage{
TotalBuildCacheRecords: int64(len(du.BuildCache)),
Items: du.BuildCache,
}
var used int64
for _, b := range du.BuildCache {
if !b.Shared {
bdu.TotalSize += b.Size
}
if b.InUse {
bdu.ActiveBuildCacheRecords++
if !b.Shared {
used += b.Size
}
}
}
bdu.Reclaimable = bdu.TotalSize - used
return bdu
}
func volumeDiskUsageFromLegacyAPI(du *system.DiskUsage) VolumesDiskUsage {
vdu := VolumesDiskUsage{
TotalVolumes: int64(len(du.Volumes)),
Items: du.Volumes,
}
var used int64
for _, v := range vdu.Items {
// Ignore volumes with no usage data
if v.UsageData != nil {
if v.UsageData.RefCount > 0 {
vdu.ActiveVolumes++
used += v.UsageData.Size
}
if v.UsageData.Size > 0 {
vdu.TotalSize += v.UsageData.Size
}
}
}
vdu.Reclaimable = vdu.TotalSize - used
return vdu
}

View File

@@ -33,7 +33,7 @@ func TestDiskUsage(t *testing.T) {
TotalImages: 0,
Reclaimable: 0,
TotalSize: 4096,
Items: []*image.Summary{},
Items: []image.Summary{},
},
})(req)
}))
@@ -131,19 +131,22 @@ func TestDiskUsageWithOptions(t *testing.T) {
}
func TestLegacyDiskUsage(t *testing.T) {
const legacyVersion = "1.51"
const expectedURL = "/system/df"
client, err := NewClientWithOpts(WithMockClient(func(req *http.Request) (*http.Response, error) {
if err := assertRequest(req, http.MethodGet, expectedURL); err != nil {
return nil, err
}
client, err := NewClientWithOpts(
WithVersion(legacyVersion),
WithMockClient(func(req *http.Request) (*http.Response, error) {
if err := assertRequest(req, http.MethodGet, "/v"+legacyVersion+expectedURL); err != nil {
return nil, err
}
return mockJSONResponse(http.StatusOK, nil, system.DiskUsage{
LegacyDiskUsage: system.LegacyDiskUsage{
LayersSize: 4096,
Images: []*image.Summary{},
},
})(req)
}))
return mockJSONResponse(http.StatusOK, nil, system.DiskUsage{
LegacyDiskUsage: system.LegacyDiskUsage{
LayersSize: 4096,
Images: []image.Summary{},
},
})(req)
}))
assert.NilError(t, err)
du, err := client.DiskUsage(context.Background(), DiskUsageOptions{})

View File

@@ -8,6 +8,7 @@ import (
"github.com/moby/moby/v2/daemon/internal/filters"
"github.com/moby/moby/v2/daemon/server/backend"
"github.com/moby/moby/v2/daemon/server/imagebackend"
"github.com/moby/moby/v2/internal/sliceutil"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
@@ -51,7 +52,7 @@ func (daemon *Daemon) containerDiskUsage(ctx context.Context, verbose bool) (*ba
du.ActiveCount = activeCount
if verbose {
du.Items = containers
du.Items = sliceutil.Deref(containers)
}
return du, nil
@@ -96,7 +97,7 @@ func (daemon *Daemon) imageDiskUsage(ctx context.Context, verbose bool) (*backen
du.ActiveCount = activeCount
if verbose {
du.Items = images
du.Items = sliceutil.Deref(images)
}
return du, nil
@@ -130,7 +131,7 @@ func (daemon *Daemon) localVolumesSize(ctx context.Context, verbose bool) (*back
du.ActiveCount = activeCount
if verbose {
du.Items = volumes
du.Items = sliceutil.Deref(volumes)
}
return du, nil

View File

@@ -150,15 +150,15 @@ func (b *Builder) Cancel(ctx context.Context, id string) error {
}
// DiskUsage returns a report about space used by build cache
func (b *Builder) DiskUsage(ctx context.Context) ([]*build.CacheRecord, error) {
func (b *Builder) DiskUsage(ctx context.Context) ([]build.CacheRecord, error) {
duResp, err := b.controller.DiskUsage(ctx, &controlapi.DiskUsageRequest{})
if err != nil {
return nil, err
}
var items []*build.CacheRecord
var items []build.CacheRecord
for _, r := range duResp.Record {
items = append(items, &build.CacheRecord{
items = append(items, build.CacheRecord{
ID: r.ID,
Parents: r.Parents,
Type: r.RecordType,

View File

@@ -37,7 +37,7 @@ type BuildCacheDiskUsage struct {
TotalCount int64
TotalSize int64
Reclaimable int64
Items []*build.CacheRecord
Items []build.CacheRecord
}
// ContainerDiskUsage contains disk usage for containers.
@@ -46,7 +46,7 @@ type ContainerDiskUsage struct {
TotalCount int64
TotalSize int64
Reclaimable int64
Items []*container.Summary
Items []container.Summary
}
// ImageDiskUsage contains disk usage for images.
@@ -55,7 +55,7 @@ type ImageDiskUsage struct {
TotalCount int64
TotalSize int64
Reclaimable int64
Items []*image.Summary
Items []image.Summary
}
// VolumeDiskUsage contains disk usage for volumes.
@@ -64,5 +64,5 @@ type VolumeDiskUsage struct {
TotalCount int64
TotalSize int64
Reclaimable int64
Items []*volume.Volume
Items []volume.Volume
}

View File

@@ -32,7 +32,7 @@ type ClusterBackend interface {
// BuildBackend provides build specific system information.
type BuildBackend interface {
DiskUsage(context.Context) ([]*build.CacheRecord, error)
DiskUsage(context.Context) ([]build.CacheRecord, error)
}
// StatusProvider provides methods to get the swarm status of the current node.

View File

@@ -196,7 +196,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
})
}
var buildCache []*buildtypes.CacheRecord
var buildCache []buildtypes.CacheRecord
if getBuildCache {
eg.Go(func() error {
var err error
@@ -204,11 +204,6 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
if err != nil {
return errors.Wrap(err, "error getting build cache usage")
}
if buildCache == nil {
// Ensure empty `BuildCache` field is represented as empty JSON array(`[]`)
// instead of `null` to be consistent with `Images`, `Containers` etc.
buildCache = []*buildtypes.CacheRecord{}
}
return nil
})
}
@@ -227,8 +222,8 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
}
if legacyFields {
v.LayersSize = systemDiskUsage.Images.TotalSize //nolint: staticcheck,SA1019: v.LayersSize is deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.TotalSize] instead.
v.Images = systemDiskUsage.Images.Items //nolint: staticcheck,SA1019: v.Images is deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.Items] instead.
v.LayersSize = systemDiskUsage.Images.TotalSize //nolint: staticcheck,SA1019: v.LayersSize is deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.TotalSize] instead.
v.Images = nonNilSlice(systemDiskUsage.Images.Items) //nolint: staticcheck,SA1019: v.Images is deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.Items] instead.
} else if verbose {
v.ImageUsage.Items = systemDiskUsage.Images.Items
}
@@ -242,7 +237,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
}
if legacyFields {
v.Containers = systemDiskUsage.Containers.Items //nolint: staticcheck,SA1019: v.Containers is deprecated: kept to maintain backwards compatibility with API < v1.52, use [ContainersDiskUsage.Items] instead.
v.Containers = nonNilSlice(systemDiskUsage.Containers.Items) //nolint: staticcheck,SA1019: v.Containers is deprecated: kept to maintain backwards compatibility with API < v1.52, use [ContainersDiskUsage.Items] instead.
} else if verbose {
v.ContainerUsage.Items = systemDiskUsage.Containers.Items
}
@@ -256,7 +251,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
}
if legacyFields {
v.Volumes = systemDiskUsage.Volumes.Items //nolint: staticcheck,SA1019: v.Volumes is deprecated: kept to maintain backwards compatibility with API < v1.52, use [VolumesDiskUsage.Items] instead.
v.Volumes = nonNilSlice(systemDiskUsage.Volumes.Items) //nolint: staticcheck,SA1019: v.Volumes is deprecated: kept to maintain backwards compatibility with API < v1.52, use [VolumesDiskUsage.Items] instead.
} else if verbose {
v.VolumeUsage.Items = systemDiskUsage.Volumes.Items
}
@@ -287,7 +282,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
v.BuildCacheUsage.Reclaimable = reclaimable
if legacyFields {
v.BuildCache = buildCache //nolint: staticcheck,SA1019: v.BuildCache is deprecated: kept to maintain backwards compatibility with API < v1.52, use [BuildCacheDiskUsage.Items] instead.
v.BuildCache = nonNilSlice(buildCache) //nolint: staticcheck,SA1019: v.BuildCache is deprecated: kept to maintain backwards compatibility with API < v1.52, use [BuildCacheDiskUsage.Items] instead.
} else if verbose {
v.BuildCacheUsage.Items = buildCache
}
@@ -295,6 +290,15 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
return httputils.WriteJSON(w, http.StatusOK, v)
}
// nonNilSlice is used for the legacy fields, which are either omitted
// entirely, or (if set), must return an empty slice in the response.
func nonNilSlice[T any](s []T) []T {
if s == nil {
return []T{}
}
return s
}
type invalidRequestError struct {
Err error
}

View File

@@ -5,10 +5,6 @@ import (
"testing"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/moby/moby/api/types/build"
containertypes "github.com/moby/moby/api/types/container"
"github.com/moby/moby/api/types/image"
"github.com/moby/moby/api/types/volume"
"github.com/moby/moby/client"
"github.com/moby/moby/v2/integration/internal/container"
"github.com/moby/moby/v2/internal/testutil"
@@ -56,19 +52,12 @@ func TestDiskUsage(t *testing.T) {
}
assert.DeepEqual(t, du, client.DiskUsageResult{
Containers: client.ContainersDiskUsage{
Items: []containertypes.Summary{},
},
Containers: client.ContainersDiskUsage{},
Images: client.ImagesDiskUsage{
TotalSize: expectedLayersSize,
Items: []image.Summary{},
},
BuildCache: client.BuildCacheDiskUsage{
Items: []build.CacheRecord{},
},
Volumes: client.VolumesDiskUsage{
Items: []volume.Volume{},
},
BuildCache: client.BuildCacheDiskUsage{},
Volumes: client.VolumesDiskUsage{},
})
return du
},
@@ -155,9 +144,9 @@ func TestDiskUsage(t *testing.T) {
},
expected: client.DiskUsageResult{
Containers: stepDU.Containers,
Images: client.ImagesDiskUsage{Items: []image.Summary{}},
BuildCache: client.BuildCacheDiskUsage{Items: []build.CacheRecord{}},
Volumes: client.VolumesDiskUsage{Items: []volume.Volume{}},
Images: client.ImagesDiskUsage{},
BuildCache: client.BuildCacheDiskUsage{},
Volumes: client.VolumesDiskUsage{},
},
},
{
@@ -167,10 +156,10 @@ func TestDiskUsage(t *testing.T) {
Verbose: true,
},
expected: client.DiskUsageResult{
Containers: client.ContainersDiskUsage{Items: []containertypes.Summary{}},
Containers: client.ContainersDiskUsage{},
Images: stepDU.Images,
BuildCache: client.BuildCacheDiskUsage{Items: []build.CacheRecord{}},
Volumes: client.VolumesDiskUsage{Items: []volume.Volume{}},
BuildCache: client.BuildCacheDiskUsage{},
Volumes: client.VolumesDiskUsage{},
},
},
{
@@ -180,9 +169,9 @@ func TestDiskUsage(t *testing.T) {
Verbose: true,
},
expected: client.DiskUsageResult{
Containers: client.ContainersDiskUsage{Items: []containertypes.Summary{}},
Images: client.ImagesDiskUsage{Items: []image.Summary{}},
BuildCache: client.BuildCacheDiskUsage{Items: []build.CacheRecord{}},
Containers: client.ContainersDiskUsage{},
Images: client.ImagesDiskUsage{},
BuildCache: client.BuildCacheDiskUsage{},
Volumes: stepDU.Volumes,
},
},
@@ -193,10 +182,10 @@ func TestDiskUsage(t *testing.T) {
Verbose: true,
},
expected: client.DiskUsageResult{
Containers: client.ContainersDiskUsage{Items: []containertypes.Summary{}},
Images: client.ImagesDiskUsage{Items: []image.Summary{}},
Containers: client.ContainersDiskUsage{},
Images: client.ImagesDiskUsage{},
BuildCache: stepDU.BuildCache,
Volumes: client.VolumesDiskUsage{Items: []volume.Volume{}},
Volumes: client.VolumesDiskUsage{},
},
},
{
@@ -208,8 +197,8 @@ func TestDiskUsage(t *testing.T) {
},
expected: client.DiskUsageResult{
Containers: stepDU.Containers,
Images: client.ImagesDiskUsage{Items: []image.Summary{}},
BuildCache: client.BuildCacheDiskUsage{Items: []build.CacheRecord{}},
Images: client.ImagesDiskUsage{},
BuildCache: client.BuildCacheDiskUsage{},
Volumes: stepDU.Volumes,
},
},
@@ -221,10 +210,10 @@ func TestDiskUsage(t *testing.T) {
Verbose: true,
},
expected: client.DiskUsageResult{
Containers: client.ContainersDiskUsage{Items: []containertypes.Summary{}},
Containers: client.ContainersDiskUsage{},
Images: stepDU.Images,
BuildCache: stepDU.BuildCache,
Volumes: client.VolumesDiskUsage{Items: []volume.Volume{}},
Volumes: client.VolumesDiskUsage{},
},
},
{
@@ -237,9 +226,7 @@ func TestDiskUsage(t *testing.T) {
},
expected: client.DiskUsageResult{
Containers: stepDU.Containers,
Images: client.ImagesDiskUsage{
Items: []image.Summary{},
},
Images: client.ImagesDiskUsage{},
BuildCache: stepDU.BuildCache,
Volumes: stepDU.Volumes,
},
@@ -253,9 +240,7 @@ func TestDiskUsage(t *testing.T) {
Verbose: true,
},
expected: client.DiskUsageResult{
Containers: client.ContainersDiskUsage{
Items: []containertypes.Summary{},
},
Containers: client.ContainersDiskUsage{},
Images: stepDU.Images,
BuildCache: stepDU.BuildCache,
Volumes: stepDU.Volumes,
@@ -272,10 +257,8 @@ func TestDiskUsage(t *testing.T) {
expected: client.DiskUsageResult{
Containers: stepDU.Containers,
Images: stepDU.Images,
BuildCache: client.BuildCacheDiskUsage{
Items: []build.CacheRecord{},
},
Volumes: stepDU.Volumes,
BuildCache: client.BuildCacheDiskUsage{},
Volumes: stepDU.Volumes,
},
},
{

View File

@@ -1,5 +1,18 @@
package sliceutil
func Deref[T any](slice []*T) []T {
if slice == nil {
return nil
}
out := make([]T, 0, len(slice))
for _, p := range slice {
if p != nil {
out = append(out, *p)
}
}
return out
}
func Dedup[T comparable](slice []T) []T {
keys := make(map[T]struct{})
out := make([]T, 0, len(slice))

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of build cache records.
//
Items []*CacheRecord `json:"Items,omitempty"`
Items []CacheRecord `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing inactive build cache records.
//

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of container summaries.
//
Items []*Summary `json:"Items,omitempty"`
Items []Summary `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing inactive containers.
//

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of image summaries.
//
Items []*Summary `json:"Items,omitempty"`
Items []Summary `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing unused images.
//

View File

@@ -37,14 +37,14 @@ type LegacyDiskUsage struct {
LayersSize int64 `json:"LayersSize,omitempty"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [ImagesDiskUsage.Items] instead.
Images []*image.Summary `json:"Images,omitempty"`
Images []image.Summary `json:"Images,omitzero"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [ContainersDiskUsage.Items] instead.
Containers []*container.Summary `json:"Containers,omitempty"`
Containers []container.Summary `json:"Containers,omitzero"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [VolumesDiskUsage.Items] instead.
Volumes []*volume.Volume `json:"Volumes,omitempty"`
Volumes []volume.Volume `json:"Volumes,omitzero"`
// Deprecated: kept to maintain backwards compatibility with API < v1.52, use [BuildCacheDiskUsage.Items] instead.
BuildCache []*build.CacheRecord `json:"BuildCache,omitempty"`
BuildCache []build.CacheRecord `json:"BuildCache,omitzero"`
}

View File

@@ -17,7 +17,7 @@ type DiskUsage struct {
// List of volumes.
//
Items []*Volume `json:"Items,omitempty"`
Items []Volume `json:"Items,omitempty"`
// Disk space that can be reclaimed by removing inactive volumes.
//

View File

@@ -5,12 +5,15 @@ import (
"encoding/json"
"fmt"
"net/url"
"slices"
"strings"
"github.com/moby/moby/api/types/build"
"github.com/moby/moby/api/types/container"
"github.com/moby/moby/api/types/image"
"github.com/moby/moby/api/types/system"
"github.com/moby/moby/api/types/volume"
"github.com/moby/moby/client/pkg/versions"
)
// DiskUsageOptions holds parameters for [Client.DiskUsage] operations.
@@ -151,103 +154,166 @@ func (cli *Client) DiskUsage(ctx context.Context, options DiskUsageOptions) (Dis
return DiskUsageResult{}, fmt.Errorf("Error retrieving disk usage: %v", err)
}
var (
r DiskUsageResult
imagesFrom = []*image.Summary{}
containersFrom = []*container.Summary{}
volumesFrom = []*volume.Volume{}
buildCacheFrom = []*build.CacheRecord{}
)
// Generate result from a legacy response.
if versions.LessThan(cli.version, "1.52") {
return diskUsageResultFromLegacyAPI(&du), nil
}
if du.ImageUsage != nil {
var r DiskUsageResult
if idu := du.ImageUsage; idu != nil {
r.Images = ImagesDiskUsage{
ActiveImages: du.ImageUsage.ActiveImages,
Reclaimable: du.ImageUsage.Reclaimable,
TotalImages: du.ImageUsage.TotalImages,
TotalSize: du.ImageUsage.TotalSize,
ActiveImages: idu.ActiveImages,
Reclaimable: idu.Reclaimable,
TotalImages: idu.TotalImages,
TotalSize: idu.TotalSize,
}
if options.Verbose {
imagesFrom = du.ImageUsage.Items
}
} else {
// Fallback for legacy response.
r.Images = ImagesDiskUsage{
TotalSize: du.LayersSize,
}
if du.Images != nil && options.Verbose {
imagesFrom = du.Images
r.Images.Items = slices.Clone(idu.Items)
}
}
r.Images.Items = make([]image.Summary, len(imagesFrom))
for i, ii := range imagesFrom {
r.Images.Items[i] = *ii
}
if du.ContainerUsage != nil {
if cdu := du.ContainerUsage; cdu != nil {
r.Containers = ContainersDiskUsage{
ActiveContainers: du.ContainerUsage.ActiveContainers,
Reclaimable: du.ContainerUsage.Reclaimable,
TotalContainers: du.ContainerUsage.TotalContainers,
TotalSize: du.ContainerUsage.TotalSize,
ActiveContainers: cdu.ActiveContainers,
Reclaimable: cdu.Reclaimable,
TotalContainers: cdu.TotalContainers,
TotalSize: cdu.TotalSize,
}
if options.Verbose {
containersFrom = du.ContainerUsage.Items
r.Containers.Items = slices.Clone(cdu.Items)
}
} else if du.Containers != nil && options.Verbose {
// Fallback for legacy response.
containersFrom = du.Containers
}
r.Containers.Items = make([]container.Summary, len(containersFrom))
for i, c := range containersFrom {
r.Containers.Items[i] = *c
}
if du.BuildCacheUsage != nil {
if bdu := du.BuildCacheUsage; bdu != nil {
r.BuildCache = BuildCacheDiskUsage{
ActiveBuildCacheRecords: du.BuildCacheUsage.ActiveBuildCacheRecords,
Reclaimable: du.BuildCacheUsage.Reclaimable,
TotalBuildCacheRecords: du.BuildCacheUsage.TotalBuildCacheRecords,
TotalSize: du.BuildCacheUsage.TotalSize,
ActiveBuildCacheRecords: bdu.ActiveBuildCacheRecords,
Reclaimable: bdu.Reclaimable,
TotalBuildCacheRecords: bdu.TotalBuildCacheRecords,
TotalSize: bdu.TotalSize,
}
if options.Verbose {
buildCacheFrom = du.BuildCacheUsage.Items
r.BuildCache.Items = slices.Clone(bdu.Items)
}
} else if du.BuildCache != nil && options.Verbose {
// Fallback for legacy response.
buildCacheFrom = du.BuildCache
}
r.BuildCache.Items = make([]build.CacheRecord, len(buildCacheFrom))
for i, b := range buildCacheFrom {
r.BuildCache.Items[i] = *b
}
if du.VolumeUsage != nil {
if vdu := du.VolumeUsage; vdu != nil {
r.Volumes = VolumesDiskUsage{
ActiveVolumes: du.VolumeUsage.ActiveVolumes,
Reclaimable: du.VolumeUsage.Reclaimable,
TotalSize: du.VolumeUsage.TotalSize,
TotalVolumes: du.VolumeUsage.TotalVolumes,
ActiveVolumes: vdu.ActiveVolumes,
Reclaimable: vdu.Reclaimable,
TotalVolumes: vdu.TotalVolumes,
TotalSize: vdu.TotalSize,
}
if options.Verbose {
volumesFrom = du.VolumeUsage.Items
r.Volumes.Items = slices.Clone(vdu.Items)
}
} else if du.Volumes != nil && options.Verbose {
// Fallback for legacy response.
volumesFrom = du.Volumes
}
r.Volumes.Items = make([]volume.Volume, len(volumesFrom))
for i, v := range volumesFrom {
r.Volumes.Items[i] = *v
}
return r, nil
}
func diskUsageResultFromLegacyAPI(du *system.DiskUsage) DiskUsageResult {
return DiskUsageResult{
Images: imageDiskUsageFromLegacyAPI(du),
Containers: containerDiskUsageFromLegacyAPI(du),
BuildCache: buildCacheDiskUsageFromLegacyAPI(du),
Volumes: volumeDiskUsageFromLegacyAPI(du),
}
}
func imageDiskUsageFromLegacyAPI(du *system.DiskUsage) ImagesDiskUsage {
idu := ImagesDiskUsage{
TotalSize: du.LayersSize,
TotalImages: int64(len(du.Images)),
Items: du.Images,
}
var used int64
for _, i := range idu.Items {
if i.Containers > 0 {
idu.ActiveImages++
if i.Size == -1 || i.SharedSize == -1 {
continue
}
used += (i.Size - i.SharedSize)
}
}
if idu.TotalImages > 0 {
idu.Reclaimable = idu.TotalSize - used
}
return idu
}
func containerDiskUsageFromLegacyAPI(du *system.DiskUsage) ContainersDiskUsage {
cdu := ContainersDiskUsage{
TotalContainers: int64(len(du.Containers)),
Items: du.Containers,
}
var used int64
for _, c := range cdu.Items {
cdu.TotalSize += c.SizeRw
switch strings.ToLower(c.State) {
case "running", "paused", "restarting":
cdu.ActiveContainers++
used += c.SizeRw
}
}
cdu.Reclaimable = cdu.TotalSize - used
return cdu
}
func buildCacheDiskUsageFromLegacyAPI(du *system.DiskUsage) BuildCacheDiskUsage {
bdu := BuildCacheDiskUsage{
TotalBuildCacheRecords: int64(len(du.BuildCache)),
Items: du.BuildCache,
}
var used int64
for _, b := range du.BuildCache {
if !b.Shared {
bdu.TotalSize += b.Size
}
if b.InUse {
bdu.ActiveBuildCacheRecords++
if !b.Shared {
used += b.Size
}
}
}
bdu.Reclaimable = bdu.TotalSize - used
return bdu
}
func volumeDiskUsageFromLegacyAPI(du *system.DiskUsage) VolumesDiskUsage {
vdu := VolumesDiskUsage{
TotalVolumes: int64(len(du.Volumes)),
Items: du.Volumes,
}
var used int64
for _, v := range vdu.Items {
// Ignore volumes with no usage data
if v.UsageData != nil {
if v.UsageData.RefCount > 0 {
vdu.ActiveVolumes++
used += v.UsageData.Size
}
if v.UsageData.Size > 0 {
vdu.TotalSize += v.UsageData.Size
}
}
}
vdu.Reclaimable = vdu.TotalSize - used
return vdu
}