diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go index d77b87c8e1..7668bf86df 100644 --- a/builder/builder-next/builder.go +++ b/builder/builder-next/builder.go @@ -97,6 +97,7 @@ type Opt struct { ContainerdAddress string ContainerdNamespace string Callbacks exporter.BuildkitCallbacks + CDISpecDirs []string } // Builder can build using BuildKit backend diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index 103df435d3..53b30569db 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -45,6 +45,7 @@ import ( containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/bboltcachestorage" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/archutil" @@ -59,6 +60,7 @@ import ( "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt" "go.opentelemetry.io/otel/sdk/trace" + "tags.cncf.io/container-device-interface/pkg/cdi" ) func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) { @@ -110,6 +112,11 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt dns := getDNSConfig(opt.DNSConfig) + cdiManager, err := getCDIManager(opt.CDISpecDirs) + if err != nil { + return nil, err + } + workerOpts := containerd.WorkerOptions{ Root: opt.Root, Address: opt.ContainerdAddress, @@ -123,6 +130,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt NetworkOpt: nc, ApparmorProfile: opt.ApparmorProfile, Selinux: false, + CDIManager: cdiManager, } wo, err := containerd.NewWorkerOpt(workerOpts, ctd.WithTimeout(60*time.Second)) @@ -144,7 +152,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt wo.RegistryHosts = opt.RegistryHosts wo.Labels = getLabels(opt, wo.Labels) - exec, err := newExecutor(opt.Root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile) + exec, err := newExecutor(opt.Root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile, cdiManager) if err != nil { return nil, err } @@ -318,7 +326,12 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt dns := getDNSConfig(opt.DNSConfig) - exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile) + cdiManager, err := getCDIManager(opt.CDISpecDirs) + if err != nil { + return nil, err + } + + exec, err := newExecutor(root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile, cdiManager) if err != nil { return nil, err } @@ -386,6 +399,7 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt LeaseManager: lm, GarbageCollect: mdb.GarbageCollect, Labels: getLabels(opt, nil), + CDIManager: cdiManager, } wc := &worker.Controller{} @@ -529,3 +543,24 @@ func getLabels(opt Opt, labels map[string]string) map[string]string { } return labels } + +func getCDIManager(specDirs []string) (*cdidevices.Manager, error) { + // TODO: intentionally not returning nil here on empty specDirs as not handled in all code-paths yet + cdiCache, err := func() (*cdi.Cache, error) { + cdiCache, err := cdi.NewCache( + cdi.WithSpecDirs(specDirs...), + cdi.WithAutoRefresh(false), + ) + if err != nil { + return nil, err + } + if err := cdiCache.Refresh(); err != nil { + return nil, err + } + return cdiCache, nil + }() + if err != nil { + return nil, errors.Wrapf(err, "CDI registry initialization failure") + } + return cdidevices.NewManager(cdiCache), nil +} diff --git a/builder/builder-next/executor_linux.go b/builder/builder-next/executor_linux.go index 5e9b06ae6a..b81fe8fab7 100644 --- a/builder/builder-next/executor_linux.go +++ b/builder/builder-next/executor_linux.go @@ -19,6 +19,7 @@ import ( resourcestypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/executor/runcexecutor" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/network" "github.com/opencontainers/runtime-spec/specs-go" @@ -26,7 +27,7 @@ import ( const networkName = "bridge" -func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfig *oci.DNSConfig, rootless bool, idmap idtools.IdentityMapping, apparmorProfile string) (executor.Executor, error) { +func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfig *oci.DNSConfig, rootless bool, idmap idtools.IdentityMapping, apparmorProfile string, cdiManager *cdidevices.Manager) (executor.Executor, error) { netRoot := filepath.Join(root, "net") networkProviders := map[pb.NetMode]network.Provider{ pb.NetMode_UNSET: &bridgeProvider{Controller: net, Root: netRoot}, @@ -74,6 +75,7 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi DNS: dnsConfig, ApparmorProfile: apparmorProfile, ResourceMonitor: rm, + CDIManager: cdiManager, }, networkProviders) } diff --git a/builder/builder-next/executor_nolinux.go b/builder/builder-next/executor_nolinux.go index ef396376ea..ca0b6bc134 100644 --- a/builder/builder-next/executor_nolinux.go +++ b/builder/builder-next/executor_nolinux.go @@ -13,9 +13,10 @@ import ( "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" resourcetypes "github.com/moby/buildkit/executor/resources/types" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" ) -func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) { +func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string, _ *cdidevices.Manager) (executor.Executor, error) { return &stubExecutor{}, nil } diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go index 9d1f0ee4f7..8bdb0aae05 100644 --- a/builder/builder-next/worker/worker.go +++ b/builder/builder-next/worker/worker.go @@ -35,6 +35,7 @@ import ( "github.com/moby/buildkit/snapshot" containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/llbsolver/mounts" "github.com/moby/buildkit/solver/llbsolver/ops" "github.com/moby/buildkit/solver/pb" @@ -86,6 +87,7 @@ type Opt struct { Exporter exporter.Exporter Layers LayerAccess Platforms []ocispec.Platform + CDIManager *cdidevices.Manager } // Worker is a local worker instance with dedicated snapshotter, cache, and so on. @@ -480,6 +482,10 @@ func (w *Worker) CacheManager() cache.Manager { return w.Opt.CacheManager } +func (w *Worker) CDIManager() *cdidevices.Manager { + return w.Opt.CDIManager +} + type discardProgress struct{} func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error { diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 7ecd629d7e..38a52a10b2 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -400,6 +400,11 @@ func initBuildkit(ctx context.Context, d *daemon.Daemon) (_ builderOptions, clos return builderOptions{}, closeFn, err } + var cdiSpecDirs []string + if d.Features()["cdi"] { + cdiSpecDirs = d.Config().CDISpecDirs + } + cfg := d.Config() bk, err := buildkit.New(ctx, buildkit.Opt{ SessionManager: sm, @@ -423,6 +428,7 @@ func initBuildkit(ctx context.Context, d *daemon.Daemon) (_ builderOptions, clos Exported: d.ImageExportedByBuildkit, Named: d.ImageNamedByBuildkit, }, + CDISpecDirs: cdiSpecDirs, }) if err != nil { return builderOptions{}, closeFn, errors.Wrap(err, "error creating buildkit instance") diff --git a/vendor.mod b/vendor.mod index d0482a7b89..d707b57d71 100644 --- a/vendor.mod +++ b/vendor.mod @@ -63,7 +63,7 @@ require ( github.com/miekg/dns v1.1.57 github.com/mistifyio/go-zfs/v3 v3.0.1 github.com/mitchellh/copystructure v1.2.0 - github.com/moby/buildkit v0.19.0 + github.com/moby/buildkit v0.20.0-rc1.0.20250212151618-2f7007eec399 github.com/moby/docker-image-spec v1.3.1 github.com/moby/ipvs v1.1.0 github.com/moby/locker v1.0.1 @@ -121,6 +121,9 @@ require ( cloud.google.com/go v0.112.0 // indirect cloud.google.com/go/longrunning v0.5.4 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect @@ -203,7 +206,7 @@ require ( github.com/tinylib/msgp v1.1.8 // indirect github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205 // indirect github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a // indirect - github.com/tonistiigi/go-actions-cache v0.0.0-20241210095730-017636a73805 // indirect + github.com/tonistiigi/go-actions-cache v0.0.0-20250211194249-bd99cf5bbc65 // indirect github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect diff --git a/vendor.sum b/vendor.sum index ad584ec7a6..053ffd2bd8 100644 --- a/vendor.sum +++ b/vendor.sum @@ -17,8 +17,20 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8af github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glgZZNbQUg9BJC57l6BiSKov0Ivv7k0U= @@ -163,8 +175,12 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= +github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -367,8 +383,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= -github.com/moby/buildkit v0.19.0 h1:w9G1p7sArvCGNkpWstAqJfRQTXBKukMyMK1bsah1HNo= -github.com/moby/buildkit v0.19.0/go.mod h1:WiHBFTgWV8eB1AmPxIWsAlKjUACAwm3X/14xOV4VWew= +github.com/moby/buildkit v0.20.0-rc1.0.20250212151618-2f7007eec399 h1:5D+c0/9DQkncvcLqFGYu7it8riR9kTonRIQpgUUg6r4= +github.com/moby/buildkit v0.20.0-rc1.0.20250212151618-2f7007eec399/go.mod h1:mtRqVBkksyvFm+ljU1u+cigDh36TdFvlEGfz/XbYTiI= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= @@ -448,6 +464,8 @@ github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee h1:P6U24L02WMfj9ym github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee/go.mod h1:3uODdxMgOaPYeWU7RzZLxVtJHZ/x1f/iHkBZuKJDzuY= github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -536,8 +554,8 @@ github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205 h1:eUk79E1 github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205/go.mod h1:3Iuxbr0P7D3zUzBMAZB+ois3h/et0shEz0qApgHYGpY= github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a h1:EfGw4G0x/8qXWgtcZ6KVaPS+wpWOQMaypczzP8ojkMY= github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw= -github.com/tonistiigi/go-actions-cache v0.0.0-20241210095730-017636a73805 h1:l2x1Ubj8f5xhPzZI428ZQ6+BDafGovpdk2ITnD3twTw= -github.com/tonistiigi/go-actions-cache v0.0.0-20241210095730-017636a73805/go.mod h1:xsu+XeKT9piH/5f9Y1Zsv5krQqI34CWkIusbs5027IM= +github.com/tonistiigi/go-actions-cache v0.0.0-20250211194249-bd99cf5bbc65 h1:57xLt2zJ6in5Au6plQUKm1gfasse4j3h9lrvoor2xPs= +github.com/tonistiigi/go-actions-cache v0.0.0-20250211194249-bd99cf5bbc65/go.mod h1:h0oRlVs3NoFIHysRQ4rU1+RG4QmU0M2JVSwTYrB4igk= github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0= github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho= github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 0000000000..f88b277ab6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,842 @@ +# Release History + +## 1.16.0 (2024-10-17) + +### Features Added + +* Added field `Kind` to `runtime.StartSpanOptions` to allow a kind to be set when starting a span. + +### Bugs Fixed + +* `BearerTokenPolicy` now rewinds request bodies before retrying + +## 1.15.0 (2024-10-14) + +### Features Added + +* `BearerTokenPolicy` handles CAE claims challenges + +### Bugs Fixed + +* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled. +* Fixed an integer overflow in the retry policy. + +### Other Changes + +* Update dependencies. + +## 1.14.0 (2024-08-07) + +### Features Added + +* Added field `Attributes` to `runtime.StartSpanOptions` to simplify creating spans with attributes. + +### Other Changes + +* Include the HTTP verb and URL in `log.EventRetryPolicy` log entries so it's clear which operation is being retried. + +## 1.13.0 (2024-07-16) + +### Features Added + +- Added runtime.NewRequestFromRequest(), allowing for a policy.Request to be created from an existing *http.Request. + +## 1.12.0 (2024-06-06) + +### Features Added + +* Added field `StatusCodes` to `runtime.FetcherForNextLinkOptions` allowing for additional HTTP status codes indicating success. +* Added func `NewUUID` to the `runtime` package for generating UUIDs. + +### Bugs Fixed + +* Fixed an issue that prevented pollers using the `Operation-Location` strategy from unmarshaling the final result in some cases. + +### Other Changes + +* Updated dependencies. + +## 1.11.1 (2024-04-02) + +### Bugs Fixed + +* Pollers that use the `Location` header won't consider `http.StatusRequestTimeout` a terminal failure. +* `runtime.Poller[T].Result` won't consider non-terminal error responses as terminal. + +## 1.11.0 (2024-04-01) + +### Features Added + +* Added `StatusCodes` to `arm/policy.RegistrationOptions` to allow supporting non-standard HTTP status codes during registration. +* Added field `InsecureAllowCredentialWithHTTP` to `azcore.ClientOptions` and dependent authentication pipeline policies. +* Added type `MultipartContent` to the `streaming` package to support multipart/form payloads with custom Content-Type and file name. + +### Bugs Fixed + +* `runtime.SetMultipartFormData` won't try to stringify `[]byte` values. +* Pollers that use the `Location` header won't consider `http.StatusTooManyRequests` a terminal failure. + +### Other Changes + +* Update dependencies. + +## 1.10.0 (2024-02-29) + +### Features Added + +* Added logging event `log.EventResponseError` that will contain the contents of `ResponseError.Error()` whenever an `azcore.ResponseError` is created. +* Added `runtime.NewResponseErrorWithErrorCode` for creating an `azcore.ResponseError` with a caller-supplied error code. +* Added type `MatchConditions` for use in conditional requests. + +### Bugs Fixed + +* Fixed a potential race condition between `NullValue` and `IsNullValue`. +* `runtime.EncodeQueryParams` will escape semicolons before calling `url.ParseQuery`. + +### Other Changes + +* Update dependencies. + +## 1.9.2 (2024-02-06) + +### Bugs Fixed + +* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header. + +### Other Changes + +* Update to latest version of `internal`. + +## 1.9.1 (2023-12-11) + +### Bugs Fixed + +* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries. + +### Other Changes + +* Update dependencies. + +## 1.9.0 (2023-11-06) + +### Breaking Changes +> These changes affect only code written against previous beta versions of `v1.7.0` and `v1.8.0` +* The function `NewTokenCredential` has been removed from the `fake` package. Use a literal `&fake.TokenCredential{}` instead. +* The field `TracingNamespace` in `runtime.PipelineOptions` has been replaced by `TracingOptions`. + +### Bugs Fixed + +* Fixed an issue that could cause some allowed HTTP header values to not show up in logs. +* Include error text instead of error type in traces when the transport returns an error. +* Fixed an issue that could cause an HTTP/2 request to hang when the TCP connection becomes unresponsive. +* Block key and SAS authentication for non TLS protected endpoints. +* Passing a `nil` credential value will no longer cause a panic. Instead, the authentication is skipped. +* Calling `Error` on a zero-value `azcore.ResponseError` will no longer panic. +* Fixed an issue in `fake.PagerResponder[T]` that would cause a trailing error to be omitted when iterating over pages. +* Context values created by `azcore` will no longer flow across disjoint HTTP requests. + +### Other Changes + +* Skip generating trace info for no-op tracers. +* The `clientName` paramater in client constructors has been renamed to `moduleName`. + +## 1.9.0-beta.1 (2023-10-05) + +### Other Changes + +* The beta features for tracing and fakes have been reinstated. + +## 1.8.0 (2023-10-05) + +### Features Added + +* This includes the following features from `v1.8.0-beta.N` releases. + * Claims and CAE for authentication. + * New `messaging` package. + * Various helpers in the `runtime` package. + * Deprecation of `runtime.With*` funcs and their replacements in the `policy` package. +* Added types `KeyCredential` and `SASCredential` to the `azcore` package. + * Includes their respective constructor functions. +* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package. + * Includes their respective constructor functions and options types. + +### Breaking Changes +> These changes affect only code written against beta versions of `v1.8.0` +* The beta features for tracing and fakes have been omitted for this release. + +### Bugs Fixed + +* Fixed an issue that could cause some ARM RPs to not be automatically registered. +* Block bearer token authentication for non TLS protected endpoints. + +### Other Changes + +* Updated dependencies. + +## 1.8.0-beta.3 (2023-09-07) + +### Features Added + +* Added function `FetcherForNextLink` and `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. + +### Bugs Fixed + +* Suppress creating spans for nested SDK API calls. The HTTP span will be a child of the outer API span. + +### Other Changes + +* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated. + * `WithCaptureResponse` + * `WithHTTPHeader` + * `WithRetryOptions` + +## 1.7.2 (2023-09-06) + +### Bugs Fixed + +* Fix default HTTP transport to work in WASM modules. + +## 1.8.0-beta.2 (2023-08-14) + +### Features Added + +* Added function `SanitizePagerPollerPath` to the `server` package to centralize sanitization and formalize the contract. +* Added `TokenRequestOptions.EnableCAE` to indicate whether to request a CAE token. + +### Breaking Changes + +> This change affects only code written against beta version `v1.8.0-beta.1`. +* `messaging.CloudEvent` deserializes JSON objects as `[]byte`, instead of `json.RawMessage`. See the documentation for CloudEvent.Data for more information. + +> This change affects only code written against beta versions `v1.7.0-beta.2` and `v1.8.0-beta.1`. +* Removed parameter from method `Span.End()` and its type `tracing.SpanEndOptions`. This API GA'ed in `v1.2.0` so we cannot change it. + +### Bugs Fixed + +* Propagate any query parameters when constructing a fake poller and/or injecting next links. + +## 1.7.1 (2023-08-14) + +## Bugs Fixed + +* Enable TLS renegotiation in the default transport policy. + +## 1.8.0-beta.1 (2023-07-12) + +### Features Added + +- `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) + +### Other Changes + +* The beta features for CAE, tracing, and fakes have been reinstated. + +## 1.7.0 (2023-07-12) + +### Features Added +* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing. + +### Breaking Changes +> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2 +* The beta features for CAE, tracing, and fakes have been omitted for this release. + +## 1.7.0-beta.2 (2023-06-06) + +### Breaking Changes +> These changes affect only code written against beta version v1.7.0-beta.1 +* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed. + * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`. +* Method `AddError()` has been removed from type `tracing.Span`. +* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`. + +## 1.6.1 (2023-06-06) + +### Bugs Fixed +* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry. + +### Other Changes +* This version contains all bug fixes from `v1.7.0-beta.1` + +## 1.7.0-beta.1 (2023-05-24) + +### Features Added +* Restored CAE support for ARM clients. +* Added supporting features to enable distributed tracing. + * Added func `runtime.StartSpan()` for use by SDKs to start spans. + * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context. + * Added field `TracingNamespace` to `runtime.PipelineOptions`. + * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types. + * Added field `SpanFromContext` to `tracing.TracerOptions`. + * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. + * Added supporting pipeline policies to include HTTP spans when creating clients. +* Added package `fake` to support generated fakes packages in SDKs. + * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations. + * Added an internal fake poller implementation. + +### Bugs Fixed +* Retry policy always clones the underlying `*http.Request` before invoking the next policy. +* Added some non-standard error codes to the list of error codes for unregistered resource providers. + +## 1.6.0 (2023-05-04) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `TenantID` field to `policy.TokenRequestOptions`. + +## 1.5.0 (2023-04-06) + +### Features Added +* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry. + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +> These features will return in v1.6.0-beta.1. +* Removed `TokenRequestOptions.Claims` and `.TenantID` +* Removed ARM client support for CAE and cross-tenant auth. + +### Bugs Fixed +* Added non-conformant LRO terminal states `Cancelled` and `Completed`. + +### Other Changes +* Updated to latest `internal` module. + +## 1.5.0-beta.1 (2023-03-02) + +### Features Added +* This release includes the features added in v1.4.0-beta.1 + +## 1.4.0 (2023-03-02) +> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1. + +### Features Added +* Add `Clone()` method for `arm/policy.ClientOptions`. + +### Bugs Fixed +* ARM's RP registration policy will no longer swallow unrecognized errors. +* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`. +* Fixed wrong policy copy in `arm/runtime.NewPipeline()`. + +## 1.4.0-beta.1 (2023-02-02) + +### Features Added +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`. +* ARM bearer token policy handles CAE challenges. + +## 1.3.1 (2023-02-02) + +### Other Changes +* Update dependencies to latest versions. + +## 1.3.0 (2023-01-06) + +### Features Added +* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` + with custom authorization logic +* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. + +### Other Changes +* Updated `internal` module to latest version. +* `policy/Request.SetBody()` allows replacing a request's body with an empty one + +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 0000000000..48ea6616b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 0000000000..35a74e18d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 0000000000..99348527b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 0000000000..9d077a3e12 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 0000000000..985b1bde2f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 0000000000..9d1c2f0c05 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,173 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "reflect" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken = exported.AccessToken + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential = exported.TokenCredential + +// KeyCredential contains an authentication key used to authenticate to an Azure service. +type KeyCredential = exported.KeyCredential + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return exported.NewKeyCredential(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +type SASCredential = exported.SASCredential + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return exported.NewSASCredential(sas) +} + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]any = map[reflect.Type]any{} +var nullablesMu sync.RWMutex + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + + nullablesMu.RLock() + v, found := nullables[t] + nullablesMu.RUnlock() + + if found { + // return the sentinel object + return v.(T) + } + + // promote to exclusive lock and check again (double-checked locking pattern) + nullablesMu.Lock() + defer nullablesMu.Unlock() + v, found = nullables[t] + + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + nullablesMu.RLock() + defer nullablesMu.RUnlock() + + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains optional settings for a client's pipeline. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. +type ClientOptions = policy.ClientOptions + +// Client is a basic HTTP client. It consists of a pipeline and tracing provider. +type Client struct { + pl runtime.Pipeline + tr tracing.Tracer + + // cached on the client to support shallow copying with new values + tp tracing.Provider + modVer string + namespace string +} + +// NewClient creates a new Client instance with the provided values. +// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider. +// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider. +// - plOpts - pipeline configuration options; can be the zero-value +// - options - optional client configurations; pass nil to accept the default values +func NewClient(moduleName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + pl := runtime.NewPipeline(moduleName, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(moduleName, moduleVersion) + if tr.Enabled() && plOpts.Tracing.Namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: plOpts.Tracing.Namespace}) + } + + return &Client{ + pl: pl, + tr: tr, + tp: options.TracingProvider, + modVer: moduleVersion, + namespace: plOpts.Tracing.Namespace, + }, nil +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} + +// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName. +// Note that the values for module name and version will be preserved from the source Client. +// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans +func (c *Client) WithClientName(clientName string) *Client { + tr := c.tp.NewTracer(clientName, c.modVer) + if tr.Enabled() && c.namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: c.namespace}) + } + return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer, namespace: c.namespace} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 0000000000..654a5f4043 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,264 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +# Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +# Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +# Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +# Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +# Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +# Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +# Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +# Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +# Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +# Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. + +# Fakes + +The fake package contains types used for constructing in-memory fake servers used in unit tests. +This allows writing tests to cover various success/error conditions without the need for connecting to a live service. + +Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 0000000000..03cb227d0d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// +// When marshaling instances, the RawResponse field will be omitted. +// However, the contents returned by Error() will be preserved. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 0000000000..2b19d01f76 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,57 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} + +// MatchConditions specifies HTTP options for conditional requests. +type MatchConditions struct { + // Optionally limit requests to resources that have a matching ETag. + IfMatch *ETag + + // Optionally limit requests to resources that do not match the ETag. + IfNoneMatch *ETag +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 0000000000..f2b296b6dc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,175 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "sync/atomic" + "time" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// AccessToken represents an Azure service bearer access token with expiry information. +// Exported as azcore.AccessToken. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +// Exported as policy.TokenRequestOptions. +type TokenRequestOptions struct { + // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a + // service may return in a claims challenge following an authorization failure. If a service returned the + // claims value base64 encoded, it must be decoded before setting this field. + Claims string + + // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true, + // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for + // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up + // in a loop retrying an API call with a token that has been revoked due to CAE. + EnableCAE bool + + // Scopes contains the list of permission scopes required for the token. + Scopes []string + + // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in + // their configured default tenants when this field isn't set. + TenantID string +} + +// TokenCredential represents a credential capable of providing an OAuth token. +// Exported as azcore.TokenCredential. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) +} + +// DecodeByteArray will base-64 decode the provided string into v. +// Exported as runtime.DecodeByteArray() +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} + +// KeyCredential contains an authentication key used to authenticate to an Azure service. +// Exported as azcore.KeyCredential. +type KeyCredential struct { + cred *keyCredential +} + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return &KeyCredential{cred: newKeyCredential(key)} +} + +// Update replaces the existing key with the specified value. +func (k *KeyCredential) Update(key string) { + k.cred.Update(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +// Exported as azcore.SASCredential. +type SASCredential struct { + cred *keyCredential +} + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return &SASCredential{cred: newKeyCredential(sas)} +} + +// Update replaces the existing shared access signature with the specified value. +func (k *SASCredential) Update(sas string) { + k.cred.Update(sas) +} + +// KeyCredentialGet returns the key for cred. +func KeyCredentialGet(cred *KeyCredential) string { + return cred.cred.Get() +} + +// SASCredentialGet returns the shared access sig for cred. +func SASCredentialGet(cred *SASCredential) string { + return cred.cred.Get() +} + +type keyCredential struct { + key atomic.Value // string +} + +func newKeyCredential(key string) *keyCredential { + keyCred := keyCredential{} + keyCred.key.Store(key) + return &keyCred +} + +func (k *keyCredential) Get() string { + return k.key.Load().(string) +} + +func (k *keyCredential) Update(key string) { + k.key.Store(key) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 0000000000..e45f831ed2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "net/http" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 0000000000..e3e2d4e588 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,260 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +// Exported as runtime.Base64Encoding +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// EncodeByteArray will base-64 encode the byte slice v. +// Exported as runtime.EncodeByteArray() +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]any + +// Set adds/changes a value +func (ov opValues) set(value any) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value any) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +// Exported as runtime.NewRequestFromRequest(). +func NewRequestFromRequest(req *http.Request) (*Request, error) { + policyReq := &Request{req: req} + + if req.Body != nil { + // we can avoid a body copy here if the underlying stream is already a + // ReadSeekCloser. + readSeekCloser, isReadSeekCloser := req.Body.(io.ReadSeekCloser) + + if !isReadSeekCloser { + // since this is an already populated http.Request we want to copy + // over its body, if it has one. + bodyBytes, err := io.ReadAll(req.Body) + + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + readSeekCloser = NopCloser(bytes.NewReader(bodyBytes)) + } + + // SetBody also takes care of updating the http.Request's body + // as well, so they should stay in-sync from this point. + if err := policyReq.SetBody(readSeekCloser, req.Header.Get("Content-Type")); err != nil { + return nil, err + } + } + + return policyReq, nil +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value any) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value any) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length +// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", +// Content-Type won't be set, and if it was set, will be deleted. +// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + // clobber the existing Content-Type to preserve behavior + return SetBody(req, body, contentType, true) +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} + +// WithContext returns a shallow copy of the request with its context changed to ctx. +func (req *Request) WithContext(ctx context.Context) *Request { + r2 := new(Request) + *r2 = *req + r2.req = r2.req.WithContext(ctx) + return r2 +} + +// not exported but dependent on Request + +// PolicyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type PolicyFunc func(*Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { + return pf(req) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly. +// - req is the request to modify +// - body is the request body; if nil or empty, Content-Length won't be set +// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted +// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType +func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 0000000000..8aec256bd0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,201 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + // prefer the error code in the response header + if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { + return NewResponseErrorWithErrorCode(resp, ec) + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := exported.Payload(resp, nil) + if err != nil { + // since we're not returning the ResponseError in this + // case we also don't want to write it to the log. + return err + } + + var errorCode string + if len(body) > 0 { + if fromJSON := extractErrorCodeJSON(body); fromJSON != "" { + errorCode = fromJSON + } else if fromXML := extractErrorCodeXML(body); fromXML != "" { + errorCode = fromXML + } + } + + return NewResponseErrorWithErrorCode(resp, errorCode) +} + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Exported as runtime.NewResponseErrorWithErrorCode(). +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + respErr := &ResponseError{ + ErrorCode: errorCode, + StatusCode: resp.StatusCode, + RawResponse: resp, + } + log.Write(log.EventResponseError, respErr.Error()) + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]any + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response `json:"-"` + + errMsg string +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + if e.errMsg != "" { + return e.errMsg + } + + const separator = "--------------------------------------------------------------------------------" + // write the request method and URL with response status code + msg := &bytes.Buffer{} + if e.RawResponse != nil { + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + fmt.Fprintln(msg, "Request information not available") + } + fmt.Fprintln(msg, separator) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + } else { + fmt.Fprintln(msg, "Missing RawResponse") + fmt.Fprintln(msg, separator) + } + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + if e.RawResponse != nil { + fmt.Fprintln(msg, separator) + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + } + fmt.Fprintln(msg, separator) + + e.errMsg = msg.String() + return e.errMsg +} + +// internal type used for marshaling/unmarshaling +type responseError struct { + ErrorCode string `json:"errorCode"` + StatusCode int `json:"statusCode"` + ErrorMessage string `json:"errorMessage"` +} + +func (e ResponseError) MarshalJSON() ([]byte, error) { + return json.Marshal(responseError{ + ErrorCode: e.ErrorCode, + StatusCode: e.StatusCode, + ErrorMessage: e.Error(), + }) +} + +func (e *ResponseError) UnmarshalJSON(data []byte) error { + re := responseError{} + if err := json.Unmarshal(data, &re); err != nil { + return err + } + + e.ErrorCode = re.ErrorCode + e.StatusCode = re.StatusCode + e.errMsg = re.ErrorMessage + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 0000000000..6fc6d1400e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventResponseError = azlog.EventResponseError + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls log.Event, format string, a ...any) { + log.Writef(cls, format, a...) +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 0000000000..a534627605 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !poller.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: state, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 0000000000..8751b05147 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := poller.StatusInProgress + provState, err := poller.GetProvisioningState(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = poller.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = poller.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = poller.StatusSucceeded + return p.CurState, nil + } + state, err := poller.GetProvisioningState(resp) + if errors.Is(err, poller.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = poller.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go new file mode 100644 index 0000000000..7f8d11b8ba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is a fake. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderFakePollerStatus) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + _, ok := token["fakeURL"] + return ok +} + +// Poller is an LRO poller that uses the Core-Fake-Poller pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The API name from CtxAPINameKey + APIName string `json:"apiName"` + + // The URL from Core-Fake-Poller header. + FakeURL string `json:"fakeURL"` + + // The LRO's current state. + FakeStatus string `json:"status"` +} + +// lroStatusURLSuffix is the URL path suffix for a faked LRO. +const lroStatusURLSuffix = "/get/fake/status" + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.") + return &Poller[T]{pl: pl}, nil + } + + log.Write(log.EventLRO, "Using Core-Fake-Poller poller.") + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return nil, errors.New("response is missing Fake-Poller-Status header") + } + + ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{}) + if ctxVal == nil { + return nil, errors.New("missing value for CtxAPINameKey") + } + + apiName, ok := ctxVal.(string) + if !ok { + return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal) + } + + qp := "" + if resp.Request.URL.RawQuery != "" { + qp = "?" + resp.Request.URL.RawQuery + } + + p := &Poller[T]{ + pl: pl, + resp: resp, + APIName: apiName, + // NOTE: any changes to this path format MUST be reflected in SanitizePollerPath() + FakeURL: fmt.Sprintf("%s://%s%s%s%s", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path, lroStatusURLSuffix, qp), + FakeStatus: fakeStatus, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.FakeStatus) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName) + err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return "", errors.New("response is missing Fake-Poller-Status header") + } + p.resp = resp + p.FakeStatus = fakeStatus + return p.FakeStatus, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.FakeStatus) { + return exported.NewResponseError(p.resp) + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), "", out) +} + +// SanitizePollerPath removes any fake-appended suffix from a URL's path. +func SanitizePollerPath(path string) string { + return strings.TrimSuffix(path, lroStatusURLSuffix) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 0000000000..048285275d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := poller.GetProvisioningState(resp) + if state == "" { + state = poller.StatusInProgress + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: state, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := poller.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = poller.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = poller.StatusSucceeded + } else if pollers.IsNonTerminalHTTPStatusCode(resp) { + // the request timed out or is being throttled. + // DO NOT include this as a terminal failure. preserve + // the existing state and return the response. + } else { + p.CurState = poller.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 0000000000..03699fd762 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]any) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !poller.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !poller.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := poller.StatusInProgress + status, err := poller.GetStatus(resp) + if err != nil && !errors.Is(err, poller.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := poller.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + + // when the payload is included with the status monitor on + // terminal success it's in the "result" JSON property + payloadPath := "result" + + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + // no JSON path when making a final GET request + payloadPath = "" + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 0000000000..37ed647f4e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 0000000000..6a7a32e034 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,212 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]any{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !poller.StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return azexported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp, nil) + if err != nil { + return err + } + + if jsonPath != "" && len(payload) > 0 { + // extract the payload from the specified JSON path. + // do this before the zero-length check in case there + // is no payload. + jsonBody := map[string]json.RawMessage{} + if err = json.Unmarshal(payload, &jsonBody); err != nil { + return err + } + payload = jsonBody[jsonPath] + } + + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} + +// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be +// considered non-terminal thus eligible for retry. +func IsNonTerminalHTTPStatusCode(resp *http.Response) bool { + return exported.HasStatusCode(resp, + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + ) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 0000000000..9f53770e5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" + ContentTypeTextPlain = "text/plain" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderFakePollerStatus = "Fake-Poller-Status" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderRetryAfterMS = "Retry-After-Ms" + HeaderUserAgent = "User-Agent" + HeaderWWWAuthenticate = "WWW-Authenticate" + HeaderXMSClientRequestID = "x-ms-client-request-id" + HeaderXMSRequestID = "x-ms-request-id" + HeaderXMSErrorCode = "x-ms-error-code" + HeaderXMSRetryAfterMS = "x-ms-retry-after-ms" +) + +const BearerTokenPrefix = "Bearer " + +const TracingNamespaceAttrName = "az.namespace" + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.16.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 0000000000..d3da2c5fdf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,149 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "time" +) + +// NOTE: when adding a new context key type, it likely needs to be +// added to the deny-list of key types in ContextWithDeniedValues + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxWithCaptureResponse is used as a context key for retrieving the raw response. +type CtxWithCaptureResponse struct{} + +// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. +type CtxWithTracingTracer struct{} + +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value. +// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) time.Duration + } + + nop := func(string) time.Duration { return 0 } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: HeaderRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderXMSRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderRetryAfter, + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) time.Duration { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0 + } + return time.Until(t) + }, + }, + } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 { + return time.Duration(retryAfter) * retry.units + } else if d := retry.custom(v); d > 0 { + return d + } + } + + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} + +// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. +func ValidateModVer(moduleVersion string) error { + modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) + if !modVerRegx.MatchString(moduleVersion) { + return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) + } + return nil +} + +// ContextWithDeniedValues wraps an existing [context.Context], denying access to certain context values. +// Pipeline policies that create new requests to be sent down their own pipeline MUST wrap the caller's +// context with an instance of this type. This is to prevent context values from flowing across disjoint +// requests which can have unintended side-effects. +type ContextWithDeniedValues struct { + context.Context +} + +// Value implements part of the [context.Context] interface. +// It acts as a deny-list for certain context keys. +func (c *ContextWithDeniedValues) Value(key any) any { + switch key.(type) { + case CtxAPINameKey, CtxWithCaptureResponse, CtxWithHTTPHeaderKey, CtxWithRetryOptionsKey, CtxWithTracingTracer: + return nil + default: + return c.Context.Value(key) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 0000000000..2f3901bff3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 0000000000..f260dac363 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,55 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventResponseError entries contain information about HTTP responses that returned + // an *azcore.ResponseError (i.e. responses with a non 2xx HTTP status code). + // This includes the contents of ResponseError.Error(). + EventResponseError Event = "ResponseError" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 0000000000..fad2579ed6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 0000000000..bb37a5efb4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,198 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "context" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. +type ClientOptions struct { + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the credential in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 60 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. + StatusCodes []int + + // ShouldRetry evaluates if the retry policy should retry the request. + // When specified, the function overrides comparison against the list of + // HTTP status codes and error checking within the retry policy. Context + // and NonRetriable errors remain evaluated before calling ShouldRetry. + // The *http.Response and error parameters are mutually exclusive, i.e. + // if one is nil, the other is not nil. + // A return value of true means the retry policy should retry. + ShouldRetry func(*http.Response, error) bool +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions = exported.TokenRequestOptions + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. + // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from + // its given credential. + AuthorizationHandler AuthorizationHandler + + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the bearer token in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool +} + +// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. +type AuthorizationHandler struct { + // OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest + // whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request + // with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send + // the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token + // from its credential according to its configuration. + OnRequest func(*Request, func(TokenRequestOptions) error) error + + // OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon + // receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle. + // OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the + // Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given + // TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When + // OnChallenge returns nil, the policy will send the Request again. + OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp) +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 0000000000..c9cfa438cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 0000000000..c0d56158e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,27 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +// The error code will be extracted from the *http.Response, either from the x-ms-error-code +// header (preferred) or attempted to be parsed from the response body. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} + +// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. +// Use this variant when the error code is in a non-standard location. +func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { + return exported.NewResponseErrorWithErrorCode(resp, errorCode) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 0000000000..b960cff0b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,137 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) + + // Tracer contains the Tracer from the client that's creating the Pager. + Tracer tracing.Tracer +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + tracer tracing.Tracer + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + tracer: handler.Tracer, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + } else { + // non-LRO case, first page + p.firstPage = false + } + + var err error + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err := p.handler.Fetcher(ctx, p.current) + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} + +// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink]. +type FetcherForNextLinkOptions struct { + // NextReq is the func to be called when requesting subsequent pages. + // Used for paged operations that have a custom next link operation. + NextReq func(context.Context, string) (*policy.Request, error) + + // StatusCodes contains additional HTTP status codes indicating success. + // The default value is http.StatusOK. + StatusCodes []int +} + +// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. +// - ctx is the [context.Context] controlling the lifetime of the HTTP operation +// - pl is the [Pipeline] used to dispatch the HTTP request +// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested +// - firstReq is the func to be called when creating the request for the first page +// - options contains any optional parameters, pass nil to accept the default values +func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { + var req *policy.Request + var err error + if options == nil { + options = &FetcherForNextLinkOptions{} + } + if nextLink == "" { + req, err = firstReq(ctx) + } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { + if options.NextReq != nil { + req, err = options.NextReq(ctx, nextLink) + } else { + req, err = NewRequest(ctx, http.MethodGet, nextLink) + } + } + if err != nil { + return nil, err + } + resp, err := pl.Do(req) + if err != nil { + return nil, err + } + successCodes := []int{http.StatusOK} + successCodes = append(successCodes, options.StatusCodes...) + if !HasStatusCode(resp, successCodes...) { + return nil, NewResponseError(resp) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 0000000000..6b1f5c083e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParameters is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParameters []string + + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion APIVersionOptions + + // PerCall contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCall []policy.Policy + + // PerRetry contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetry []policy.Policy + + // Tracing contains options used to configure distributed tracing. + Tracing TracingOptions +} + +// TracingOptions contains tracing options for SDK developers. +type TracingOptions struct { + // Namespace contains the value to use for the az.namespace span attribute. + Namespace string +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy)) + policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams)) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 0000000000..e5309aa6c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 0000000000..b26db920b0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,236 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "encoding/base64" + "errors" + "net/http" + "regexp" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle +// additional authentication challenges, or needing more control over authorization, should +// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions]. +// +// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] + // the following fields are read-only + authzHandler policy.AuthorizationHandler + cred exported.TokenCredential + scopes []string + allowHTTP bool +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy + tro policy.TokenRequestOptions +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(&shared.ContextWithDeniedValues{Context: state.req.Raw().Context()}, state.tro) + if err != nil { + return exported.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &policy.BearerTokenOptions{} + } + ah := opts.AuthorizationHandler + if ah.OnRequest == nil { + // Set a default OnRequest that simply requests a token with the given scopes. OnChallenge + // doesn't get a default so the policy can use a nil check to determine whether the caller + // provided an implementation. + ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + // authNZ sets EnableCAE: true in all cases, no need to duplicate that here + return authNZ(policy.TokenRequestOptions{Scopes: scopes}) + } + } + return &BearerTokenPolicy{ + authzHandler: ah, + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + allowHTTP: opts.InsecureAllowCredentialWithHTTP, + } +} + +// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential +func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { + return func(tro policy.TokenRequestOptions) error { + tro.EnableCAE = true + as := acquiringResourceState{p: b, req: req, tro: tro} + tk, err := b.mainResource.Get(as) + if err != nil { + return err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return nil + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no TokenCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if b.cred == nil { + return req.Next() + } + + if err := checkHTTPSForAuth(req, b.allowHTTP); err != nil { + return nil, err + } + + err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) + if err != nil { + return nil, errorinfo.NonRetriableError(err) + } + + res, err := req.Next() + if err != nil { + return nil, err + } + + res, err = b.handleChallenge(req, res, false) + return res, err +} + +// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling +// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge. +// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the +// AuthorizationHandler. +func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) { + var err error + if res.StatusCode == http.StatusUnauthorized { + b.mainResource.Expire() + if res.Header.Get(shared.HeaderWWWAuthenticate) != "" { + caeChallenge, parseErr := parseCAEChallenge(res) + if parseErr != nil { + return res, parseErr + } + switch { + case caeChallenge != nil: + authNZ := func(tro policy.TokenRequestOptions) error { + // Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value + // will be empty at time of writing because CAE is the only feature involving claims. If in + // the future some client needs to specify unrelated claims, this function may need to merge + // them with the challenge claims. + tro.Claims = caeChallenge.params["claims"] + return b.authenticateAndAuthorize(req)(tro) + } + if err = b.authzHandler.OnRequest(req, authNZ); err == nil { + if err = req.RewindBody(); err == nil { + res, err = req.Next() + } + } + case b.authzHandler.OnChallenge != nil && !recursed: + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + if err = req.RewindBody(); err == nil { + if res, err = req.Next(); err == nil { + res, err = b.handleChallenge(req, res, true) + } + } + } else { + // don't retry challenge handling errors + err = errorinfo.NonRetriableError(err) + } + default: + // return the response to the pipeline + } + } + } + return res, err +} + +func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" && !allowHTTP { + return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) + } + return nil +} + +// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none). +// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError. +func parseCAEChallenge(res *http.Response) (*authChallenge, error) { + var ( + caeChallenge *authChallenge + err error + ) + for _, c := range parseChallenges(res) { + if c.scheme == "Bearer" { + if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" { + if b, de := base64.StdEncoding.DecodeString(claims); de == nil { + c.params["claims"] = string(b) + caeChallenge = &c + } else { + // don't include the decoding error because it's something + // unhelpful like "illegal base64 data at input byte 42" + err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims)) + } + break + } + } + } + return caeChallenge, err +} + +var ( + challenge, challengeParams *regexp.Regexp + once = &sync.Once{} +) + +type authChallenge struct { + scheme string + params map[string]string +} + +// parseChallenges assumes authentication challenges have quoted parameter values +func parseChallenges(res *http.Response) []authChallenge { + once.Do(func() { + // matches challenges having quoted parameters, capturing scheme and parameters + challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`) + // captures parameter names and values in a match of the above expression + challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`) + }) + parsed := []authChallenge{} + // WWW-Authenticate can have multiple values, each containing multiple challenges + for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) { + for _, sm := range challenge.FindAllStringSubmatch(h, -1) { + // sm is [challenge, scheme, params] (see regexp documentation on submatches) + c := authChallenge{ + params: make(map[string]string), + scheme: sm[1], + } + for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) { + // sm is [key="value", key, value] (see regexp documentation on submatches) + c.params[sm[1]] = sm[2] + } + parsed = append(parsed, c) + } + } + return parsed +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 0000000000..99dc029f0c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 0000000000..c230af0afa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +// Deprecated: use [policy.WithHTTPHeader] instead. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return policy.WithHTTPHeader(parent, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go new file mode 100644 index 0000000000..f375195c4b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go @@ -0,0 +1,154 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +const ( + attrHTTPMethod = "http.method" + attrHTTPURL = "http.url" + attrHTTPUserAgent = "http.user_agent" + attrHTTPStatusCode = "http.status_code" + + attrAZClientReqID = "az.client_request_id" + attrAZServiceReqID = "az.service_request_id" + + attrNetPeerName = "net.peer.name" +) + +// newHTTPTracePolicy creates a new instance of the httpTracePolicy. +// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace +func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy { + return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)} +} + +// httpTracePolicy is a policy that creates a trace for the HTTP request and its response +type httpTracePolicy struct { + allowedQP map[string]struct{} +} + +// Do implements the pipeline.Policy interfaces for the httpTracePolicy type. +func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + attributes := []tracing.Attribute{ + {Key: attrHTTPMethod, Value: req.Raw().Method}, + {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)}, + {Key: attrNetPeerName, Value: req.Raw().URL.Host}, + } + + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua}) + } + if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" { + attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID}) + } + + ctx := req.Raw().Context() + ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{ + Kind: tracing.SpanKindClient, + Attributes: attributes, + }) + + defer func() { + if resp != nil { + span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode}) + if resp.StatusCode > 399 { + span.SetStatus(tracing.SpanStatusError, resp.Status) + } + if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" { + span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID}) + } + } else if err != nil { + var urlErr *url.Error + if errors.As(err, &urlErr) { + // calling *url.Error.Error() will include the unsanitized URL + // which we don't want. in addition, we already have the HTTP verb + // and sanitized URL in the trace so we aren't losing any info + err = urlErr.Err + } + span.SetStatus(tracing.SpanStatusError, err.Error()) + } + span.End() + }() + + req = req.WithContext(ctx) + } + resp, err = req.Next() + return +} + +// StartSpanOptions contains the optional values for StartSpan. +type StartSpanOptions struct { + // Kind indicates the kind of Span. + Kind tracing.SpanKind + // Attributes contains key-value pairs of attributes for the span. + Attributes []tracing.Attribute +} + +// StartSpan starts a new tracing span. +// You must call the returned func to terminate the span. Pass the applicable error +// if the span will exit with an error condition. +// - ctx is the parent context of the newly created context +// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method") +// - tracer is the client's Tracer for creating spans +// - options contains optional values. pass nil to accept any default values +func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) { + if !tracer.Enabled() { + return ctx, func(err error) {} + } + + // we MUST propagate the active tracer before returning so that the trace policy can access it + ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) + + if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { + // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), + // then the span for Bar() must be suppressed. however, if Bar() makes a REST + // call, then Bar's HTTP span must be a child of Foo's span. + // however, there is an exception to this rule. if the SDK method Foo() is a + // messaging producer/consumer, and it takes a callback that's a SDK method + // Bar(), then the span for Bar() must _not_ be suppressed. + if kind := activeSpan.(tracing.SpanKind); kind == tracing.SpanKindClient || kind == tracing.SpanKindInternal { + return ctx, func(err error) {} + } + } + + if options == nil { + options = &StartSpanOptions{} + } + if options.Kind == 0 { + options.Kind = tracing.SpanKindInternal + } + + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ + Kind: options.Kind, + Attributes: options.Attributes, + }) + ctx = context.WithValue(ctx, ctxActiveSpan{}, options.Kind) + return ctx, func(err error) { + if err != nil { + errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) + span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error())) + } + span.End() + } +} + +// ctxActiveSpan is used as a context key for indicating a SDK client span is in progress. +type ctxActiveSpan struct{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 0000000000..bb00f6c2fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,35 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +// Deprecated: use [policy.WithCaptureResponse] instead. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return policy.WithCaptureResponse(parent, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go new file mode 100644 index 0000000000..eeb1c09cc1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. +type KeyCredentialPolicy struct { + cred *exported.KeyCredential + header string + prefix string + allowHTTP bool +} + +// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. +type KeyCredentialPolicyOptions struct { + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool + + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. + Prefix string +} + +// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy]. +// - cred is the [azcore.KeyCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the key is placed +// - options contains optional configuration, pass nil to accept the default values +func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy { + if options == nil { + options = &KeyCredentialPolicyOptions{} + } + return &KeyCredentialPolicy{ + cred: cred, + header: header, + prefix: options.Prefix, + allowHTTP: options.InsecureAllowCredentialWithHTTP, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no KeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if k.cred != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { + return nil, err + } + val := exported.KeyCredentialGet(k.cred) + if k.prefix != "" { + val = k.prefix + val + } + req.Raw().Header.Add(k.header, val) + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 0000000000..f048d7fb53 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,264 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := getAllowedQueryParams(o.AllowedQueryParams) + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// getAllowedQueryParams merges the default set of allowed query parameters +// with a custom set (usually comes from client options). +func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} { + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range customAllowedQP { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return allowedQP +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// getSanitizedURL returns a sanitized string for the provided url.URL +func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string { + // redact applicable query params + qp := u.Query() + for k := range qp { + if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + u.RawQuery = qp.Encode() + return u.String() +} + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // don't use Get() as it will canonicalize k which might cause a mismatch + value := header[k][0] + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := io.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 0000000000..360a7f2118 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 0000000000..4c3a31fea7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,276 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 60 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 800 * time.Millisecond + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + // avoid overflow when shifting left + factor := time.Duration(math.MaxInt64) + if try < 63 { + factor = time.Duration(int64(1< float64(math.MaxInt64) { + // the jitter pushed us over MaxInt64, so just use MaxInt64 + delay = time.Duration(math.MaxInt64) + } else { + delay = time.Duration(delayFloat) + } + + if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value + delay = o.MaxRetryDelay + } + + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + // unfortunately we don't have access to the custom allow-list of query params, so we'll redact everything but the default allowed QPs + log.Writef(log.EventRetryPolicy, "=====> Try=%d for %s %s", try, req.Raw().Method, getSanitizedURL(*req.Raw().URL, getAllowedQueryParams(nil))) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + clone := req.Clone(req.Raw().Context()) + resp, err = clone.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if exported.PayloadDownloaded(resp) { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if options.ShouldRetry != nil { + // a non-nil ShouldRetry overrides our HTTP status code check + if !options.ShouldRetry(resp, err) { + // predicate says we shouldn't retry + log.Write(log.EventRetryPolicy, "exit due to ShouldRetry") + return + } + } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +// Deprecated: use [policy.WithRetryOptions] instead. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return policy.WithRetryOptions(parent, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go new file mode 100644 index 0000000000..3964beea86 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// SASCredentialPolicy authorizes requests with a [azcore.SASCredential]. +type SASCredentialPolicy struct { + cred *exported.SASCredential + header string + allowHTTP bool +} + +// SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy]. +type SASCredentialPolicyOptions struct { + // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP. + // By default, authenticated requests to an HTTP endpoint are rejected by the client. + // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution. + InsecureAllowCredentialWithHTTP bool +} + +// NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy]. +// - cred is the [azcore.SASCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the shared access signature is placed +// - options contains optional configuration, pass nil to accept the default values +func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy { + if options == nil { + options = &SASCredentialPolicyOptions{} + } + return &SASCredentialPolicy{ + cred: cred, + header: header, + allowHTTP: options.InsecureAllowCredentialWithHTTP, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SASCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if k.cred != nil { + if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil { + return nil, err + } + req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred)) + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 0000000000..80a9035461 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,83 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + // mod might be the fully qualified name. in that case, we just want the package name + if i := strings.LastIndex(mod, "/"); i > -1 { + mod = mod[i+1:] + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 0000000000..03f76c9aa8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,389 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + tracer: options.Tracer, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !poller.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if fake.Applicable(resp) { + opr, err = fake.New[T](pl, resp) + } else if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + tracer: options.Tracer, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]any + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if fake.CanResume(asJSON) { + opr, _ = fake.New[T](pl, nil) + } else if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + tracer: options.Tracer, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + tracer tracing.Tracer + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + err = errors.New("polling frequency minimum is one second") + return + } + + start := time.Now() + logPollUntilDoneExit := func(v any) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err = shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + var resp *http.Response + resp, err = p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return + } + if p.Done() { + logPollUntilDoneExit("succeeded") + res, err = p.Result(ctx) + return + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + resp = p.resp + return + } + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err = p.op.Poll(ctx) + if err != nil { + return + } + p.resp = resp + return +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { + if !p.Done() { + err = errors.New("poller is in a non-terminal state") + return + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + err = p.err + return + } + res = *p.result + return + } + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + err = p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) { + // the request failed in a non-terminal way. + // don't cache the error or mark the Poller as done + return + } + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return + } + p.done = true + if p.err != nil { + err = p.err + return + } + res = *p.result + return +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} + +// extracts the type name from the string returned from reflect.Value.Name() +func shortenTypeName(s string) string { + // the value is formatted as follows + // Poller[module/Package.Type].Method + // we want to shorten the generic type parameter string to Type + // anything we don't recognize will be left as-is + begin := strings.Index(s, "[") + end := strings.Index(s, "]") + if begin == -1 || end == -1 { + return s + } + + typeName := s[begin+1 : end] + if i := strings.LastIndex(typeName, "."); i > -1 { + typeName = typeName[i+1:] + } + return s[:begin+1] + typeName + s[end:] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 0000000000..7d34b7803a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,281 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "path" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding = exported.Base64Encoding + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = exported.Base64StdFormat + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = exported.Base64URLFormat +) + +// NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +func NewRequestFromRequest(req *http.Request) (*policy.Request, error) { + return exported.NewRequestFromRequest(req) +} + +// EncodeQueryParams will parse and encode any query parameters in the specified URL. +// Any semicolons will automatically be escaped. +func EncodeQueryParams(u string) (string, error) { + before, after, found := strings.Cut(u, "?") + if !found { + return u, nil + } + // starting in Go 1.17, url.ParseQuery will reject semicolons in query params. + // so, we must escape them first. note that this assumes that semicolons aren't + // being used as query param separators which is per the current RFC. + // for more info: + // https://github.com/golang/go/issues/25192 + // https://github.com/golang/go/issues/50034 + qp, err := url.ParseQuery(strings.ReplaceAll(after, ";", "%3B")) + if err != nil { + return "", err + } + return before + "?" + qp.Encode(), nil +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + if qps != "" { + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p + } + return root + p +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + return exported.EncodeByteArray(v, format) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v any) error { + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v any) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form fields with the specified value. +// File content must be specified as an [io.ReadSeekCloser] or [streaming.MultipartContent]. +// Byte slices will be treated as JSON. All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]any) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + + quoteEscaper := strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + + writeMultipartContent := func(fieldname string, mpc streaming.MultipartContent) error { + if mpc.Body == nil { + return errors.New("streaming.MultipartContent.Body cannot be nil") + } + + // use fieldname for the file name when unspecified + filename := fieldname + + if mpc.ContentType == "" && mpc.Filename == "" { + return writeContent(fieldname, filename, mpc.Body) + } + if mpc.Filename != "" { + filename = mpc.Filename + } + // this is pretty much copied from multipart.Writer.CreateFormFile + // but lets us set the caller provided Content-Type and filename + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + quoteEscaper.Replace(fieldname), quoteEscaper.Replace(filename))) + contentType := "application/octet-stream" + if mpc.ContentType != "" { + contentType = mpc.ContentType + } + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, mpc.Body); err != nil { + return err + } + return nil + } + + // the same as multipart.Writer.WriteField but lets us specify the Content-Type + writeField := func(fieldname, contentType string, value string) error { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"`, quoteEscaper.Replace(fieldname))) + h.Set("Content-Type", contentType) + fd, err := writer.CreatePart(h) + if err != nil { + return err + } + if _, err = fd.Write([]byte(value)); err != nil { + return err + } + return nil + } + + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + if err := writeContent(k, k, rsc); err != nil { + return err + } + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } + } + continue + } else if mpc, ok := v.(streaming.MultipartContent); ok { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + continue + } else if mpcs, ok := v.([]streaming.MultipartContent); ok { + for _, mpc := range mpcs { + if err := writeMultipartContent(k, mpc); err != nil { + return err + } + } + continue + } + + var content string + contentType := shared.ContentTypeTextPlain + switch tt := v.(type) { + case []byte: + // JSON, don't quote it + content = string(tt) + contentType = shared.ContentTypeAppJSON + case string: + content = tt + default: + // ensure the value is in string format + content = fmt.Sprintf("%v", v) + } + + if err := writeField(k, contentType, content); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey = shared.CtxAPINameKey + +// NewUUID returns a new UUID using the RFC4122 algorithm. +func NewUUID() (string, error) { + u, err := uuid.New() + if err != nil { + return "", err + } + return u.String(), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 0000000000..048566e02c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,109 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp, nil) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v any) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v any) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + _, err := exported.Payload(resp, &exported.PayloadOptions{ + BytesModifier: func(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + }, + }) + if err != nil { + return err + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + return azexported.DecodeByteArray(s, v, format) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go new file mode 100644 index 0000000000..1c75d771f2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go @@ -0,0 +1,15 @@ +//go:build !wasm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return dialer.DialContext +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go new file mode 100644 index 0000000000..3dc9eeecdd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go @@ -0,0 +1,15 @@ +//go:build (js && wasm) || wasip1 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 0000000000..2124c1d48b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" + + "golang.org/x/net/http2" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: defaultTransportDialContext(&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }), + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: tls.RenegotiateFreelyAsClient, + }, + } + // TODO: evaluate removing this once https://github.com/golang/go/issues/59690 has been fixed + if http2Transport, err := http2.ConfigureTransports(defaultTransport); err == nil { + // if the connection has been idle for 10 seconds, send a ping frame for a health check + http2Transport.ReadIdleTimeout = 10 * time.Second + // if there's no response to the ping within the timeout, the connection will be closed + http2Transport.PingTimeout = 5 * time.Second + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 0000000000..cadaef3d58 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 0000000000..2468540bd7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,89 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an +// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser +// has its underlying stream closed. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} + +// MultipartContent contains streaming content used in multipart/form payloads. +type MultipartContent struct { + // Body contains the required content body. + Body io.ReadSeekCloser + + // ContentType optionally specifies the HTTP Content-Type for this Body. + // The default value is application/octet-stream. + ContentType string + + // Filename optionally specifies the filename for this Body. + // The default value is the field name for the multipart/form section. + Filename string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go new file mode 100644 index 0000000000..faa98c9dc5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package to contains various type-conversion helper functions. +package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go new file mode 100644 index 0000000000..e0e4817b90 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package to + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 0000000000..80282d4ab0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 0000000000..1ade7c560f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,191 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified module name and version. +// - module - the fully qualified name of the module +// - version - the version of the module +func (p Provider) NewTracer(module, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(module, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // SpanFromContext contains the implementation for the Tracer.SpanFromContext method. + SpanFromContext func(context.Context) Span +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + if options == nil { + options = &TracerOptions{} + } + return Tracer{ + newSpanFn: newSpanFn, + spanFromContextFn: options.SpanFromContext, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + attrs []Attribute + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + spanFromContextFn func(ctx context.Context) Span +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + opts := SpanOptions{} + if options != nil { + opts = *options + } + opts.Attributes = append(opts.Attributes, t.attrs...) + return t.newSpanFn(ctx, spanName, &opts) + } + return ctx, Span{} +} + +// SetAttributes sets attrs to be applied to each Span. If a key from attrs +// already exists for an attribute of the Span it will be overwritten with +// the value contained in attrs. +func (t *Tracer) SetAttributes(attrs ...Attribute) { + t.attrs = append(t.attrs, attrs...) +} + +// Enabled returns true if this Tracer is capable of creating Spans. +func (t Tracer) Enabled() bool { + return t.newSpanFn != nil +} + +// SpanFromContext returns the Span associated with the current context. +// If the provided context has no Span, false is returned. +func (t Tracer) SpanFromContext(ctx context.Context) Span { + if t.spanFromContextFn != nil { + return t.spanFromContextFn(ctx) + } + return Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 0000000000..48ea6616b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 0000000000..245af7d2be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 0000000000..66bf13e5f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 0000000000..8c6eacb618 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 0000000000..8ee66b5267 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,46 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} + +// NonRetriableError marks the specified error as non-retriable. +// This function takes an error as input and returns a new error that is marked as non-retriable. +func NonRetriableError(err error) error { + return &nonRetriableError{err} +} + +// nonRetriableError is a struct that embeds the error interface. +// It is used to represent errors that should not be retried. +type nonRetriableError struct { + error +} + +// Error method for nonRetriableError struct. +// It returns the error message of the embedded error. +func (p *nonRetriableError) Error() string { + return p.error.Error() +} + +// NonRetriable is a marker method for nonRetriableError struct. +// Non-functional and indicates that the error is non-retriable. +func (*nonRetriableError) NonRetriable() { + // marker method +} + +// Unwrap method for nonRetriableError struct. +// It returns the original error that was marked as non-retriable. +func (p *nonRetriableError) Unwrap() error { + return p.error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go new file mode 100644 index 0000000000..9948f604b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "io" + "net/http" +) + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// PayloadOptions contains the optional values for the Payload func. +// NOT exported but used by azcore. +type PayloadOptions struct { + // BytesModifier receives the downloaded byte slice and returns an updated byte slice. + // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM). + BytesModifier func([]byte) []byte +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload() WITHOUT the opts parameter. +func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + if resp.Body == nil { + // this shouldn't happen in real-world scenarios as a + // response with no body should set it to http.NoBody + return nil, nil + } + modifyBytes := func(b []byte) []byte { return b } + if opts != nil && opts.BytesModifier != nil { + modifyBytes = opts.BytesModifier + } + + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*nopClosingBytesReader); ok { + bytesBody := modifyBytes(buf.Bytes()) + buf.Set(bytesBody) + return bytesBody, nil + } + + bytesBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + + bytesBody = modifyBytes(bytesBody) + resp.Body = &nopClosingBytesReader{s: bytesBody} + return bytesBody, nil +} + +// PayloadDownloaded returns true if the response body has already been downloaded. +// This implies that the Payload() func above has been previously called. +// NOT exported but used by azcore. +func PayloadDownloaded(resp *http.Response) bool { + _, ok := resp.Body.(*nopClosingBytesReader) + return ok +} + +// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type nopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *nopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*nopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *nopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 0000000000..d7876d297a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 0000000000..4f1dcf1b78 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go new file mode 100644 index 0000000000..db8269627d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go @@ -0,0 +1,155 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package poller + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// these are non-conformant states that we've seen in the wild. +// we support them for back-compat. +const ( + StatusCancelled = "Cancelled" + StatusCompleted = "Completed" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return Failed(s) || Succeeded(s) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]any, error) { + body, err := exported.Payload(resp, nil) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]any + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]any) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]any) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]any) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 0000000000..238ef42ed0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired := time.Now(), false, false + + // acquire exclusive lock + er.cond.L.Lock() + resource := er.resource + + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 0000000000..a3824bee8b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 0000000000..278ac9cd1c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt new file mode 100644 index 0000000000..d1ca00f20a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go new file mode 100644 index 0000000000..ec6907fbdc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -0,0 +1,472 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client base.Client[generated.BlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (b *Client) generated() *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) credential() any { + return base.Credential((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (b *Client) URL() string { + return b.generated().Endpoint() +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (b *Client) WithVersionID(versionID string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) { + deleteOptions, leaseInfo, accessConditions := o.format() + resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions) + return resp, err +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) { + undeleteOptions := o.format() + resp, err := b.generated().Undelete(ctx, undeleteOptions) + return resp, err +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiers see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format() + resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + return resp, err +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &httpHeaders, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *SetMetadataOptions) (SetMetadataResponse, error) { + basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} + leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() + resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + return resp, err +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this + // performance hit. + opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions) + + return resp, err +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { + opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions) + return resp, err +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) { + serializedTags := shared.SerializeBlobTags(tags) + blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) { + blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err + +} + +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. Mode defaults to "Unlocked". +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *SetImmutabilityPolicyOptions) (SetImmutabilityPolicyResponse, error) { + blobSetImmutabilityPolicyOptions, modifiedAccessConditions := options.format() + blobSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry = &expiryTime + resp, err := b.generated().SetImmutabilityPolicy(ctx, blobSetImmutabilityPolicyOptions, modifiedAccessConditions) + return resp, err +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) DeleteImmutabilityPolicy(ctx context.Context, options *DeleteImmutabilityPolicyOptions) (DeleteImmutabilityPolicyResponse, error) { + deleteImmutabilityOptions := options.format() + resp, err := b.generated().DeleteImmutabilityPolicy(ctx, deleteImmutabilityOptions) + return resp, err +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetLegalHoldOptions) (SetLegalHoldResponse, error) { + setLegalHoldOptions := options.format() + resp, err := b.generated().SetLegalHold(ctx, legalHold, setLegalHoldOptions) + return resp, err +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { + copyOptions, smac, mac, lac, cpkScopeInfo := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo) + return resp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (b *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := b.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if b.sharedKey() == nil { + return "", bloberror.MissingSharedKeyCredential + } + + urlParts, err := ParseURL(b.URL()) + if err != nil { + return "", err + } + + t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) + + if err != nil { + t = time.Time{} + } + st := o.format() + + qps, err := sas.BlobSignatureValues{ + ContainerName: urlParts.ContainerName, + BlobName: urlParts.BlobName, + SnapshotTime: t, + Version: sas.Version, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(b.sharedKey()) + + if err != nil { + return "", err + } + + endpoint := b.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// downloadBuffer downloads an Azure blob to a WriterAt in parallel. +func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + count := o.Range.Count + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) + if err != nil { + return 0, err + } + count = *gr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + NumChunks: uint64(((count - 1) / o.BlockSize) + 1), + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, chunkStart int64, count int64) error { + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: count, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return err + } + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return 0, err + } + return count, nil +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format() + if o == nil { + o = &DownloadStreamOptions{} + } + + dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return DownloadStreamResponse{}, err + } + + return DownloadStreamResponse{ + client: b, + DownloadResponse: dr, + getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + cpkInfo: o.CPKInfo, + cpkScope: o.CPKScopeInfo, + }, err +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + if o == nil { + o = &DownloadBufferOptions{} + } + return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + if o == nil { + o = &DownloadFileOptions{} + } + do := (*downloadOptions)(o) + + // 1. Calculate the size of the destination file + var size int64 + + count := do.Range.Count + if count == CountToEnd { + // Try to get Azure blob's size + getBlobPropertiesOptions := do.getBlobPropertiesOptions() + props, err := b.GetProperties(ctx, getBlobPropertiesOptions) + if err != nil { + return 0, err + } + size = *props.ContentLength - do.Range.Offset + do.Range.Count = size + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return 0, err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return 0, err + } + } + + if size > 0 { + return b.downloadBuffer(ctx, file, *do) + } else { // if the blob's size is 0, there is no need in downloading it + return 0, nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go new file mode 100644 index 0000000000..daef800ed0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -0,0 +1,235 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +const ( + CountToEnd = 0 + + SnapshotTimeFormat = exported.SnapshotTimeFormat + + // DefaultDownloadBlockSize is default block size + DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + + // DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel + DefaultConcurrency = shared.DefaultConcurrency +) + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} + +// AccessTier defines values for Blob Access Tier. +type AccessTier = generated.AccessTier + +const ( + AccessTierArchive AccessTier = generated.AccessTierArchive + AccessTierCool AccessTier = generated.AccessTierCool + AccessTierCold AccessTier = generated.AccessTierCold + AccessTierHot AccessTier = generated.AccessTierHot + AccessTierP10 AccessTier = generated.AccessTierP10 + AccessTierP15 AccessTier = generated.AccessTierP15 + AccessTierP20 AccessTier = generated.AccessTierP20 + AccessTierP30 AccessTier = generated.AccessTierP30 + AccessTierP4 AccessTier = generated.AccessTierP4 + AccessTierP40 AccessTier = generated.AccessTierP40 + AccessTierP50 AccessTier = generated.AccessTierP50 + AccessTierP6 AccessTier = generated.AccessTierP6 + AccessTierP60 AccessTier = generated.AccessTierP60 + AccessTierP70 AccessTier = generated.AccessTierP70 + AccessTierP80 AccessTier = generated.AccessTierP80 + AccessTierPremium AccessTier = generated.AccessTierPremium +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleAccessTierValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type. +type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return generated.PossibleImmutabilityPolicySettingValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType. +type EncryptionAlgorithmType = generated.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256 +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return generated.PossibleEncryptionAlgorithmTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus. +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot + ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// DeleteType defines values for DeleteType. +type DeleteType = generated.DeleteType + +const ( + DeleteTypeNone DeleteType = generated.DeleteTypeNone + DeleteTypePermanent DeleteType = generated.DeleteTypePermanent +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return generated.PossibleDeleteTypeValues() +} + +// QueryFormatType - The quick query format type. +type QueryFormatType = generated.QueryFormatType + +const ( + QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited + QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON + QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow + QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return generated.PossibleQueryFormatTypeValues() +} + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType = exported.TransferValidationType + +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. +type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64 + +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return exported.TransferValidationTypeComputeCRC64() +} + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5 + +// SourceContentValidationType abstracts the various mechanisms used to validate source content. +// This interface is not publicly implementable. +type SourceContentValidationType interface { + Apply(generated.SourceContentSetter) + notPubliclyImplementable() +} + +// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provide a precomputed CRC64. +type SourceContentValidationTypeCRC64 []byte + +// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeCRC64. +func (s SourceContentValidationTypeCRC64) Apply(src generated.SourceContentSetter) { + src.SetSourceContentCRC64(s) +} + +func (SourceContentValidationTypeCRC64) notPubliclyImplementable() {} + +var _ SourceContentValidationType = (SourceContentValidationTypeCRC64)(nil) + +// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provide a precomputed MD5. +type SourceContentValidationTypeMD5 []byte + +// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeMD5. +func (s SourceContentValidationTypeMD5) Apply(src generated.SourceContentSetter) { + src.SetSourceContentMD5(s) +} + +func (SourceContentValidationTypeMD5) notPubliclyImplementable() {} + +var _ SourceContentValidationType = (SourceContentValidationTypeMD5)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go new file mode 100644 index 0000000000..d733468894 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -0,0 +1,580 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Type Declarations --------------------------------------------------------------------- + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = generated.CPKInfo + +// CPKScopeInfo contains a group of parameters for client provided encryption scope. +type CPKScopeInfo = generated.CPKScopeInfo + +// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type HTTPHeaders = generated.BlobHTTPHeaders + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// Tags represent map of blob index tags +type Tags = generated.BlobTag + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset and zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// DownloadStreamOptions contains the optional parameters for the Client.Download method. +type DownloadStreamOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + AccessConditions *AccessConditions + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo +} + +func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientDownloadOptions{ + RangeGetContentMD5: o.RangeGetContentMD5, + Range: exported.FormatHTTPRange(o.Range), + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions. +type downloadOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { + if o == nil { + return nil + } + return &GetPropertiesOptions{ + AccessConditions: o.AccessConditions, + CPKInfo: o.CPKInfo, + } +} + +func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions { + if o == nil { + return nil + } + return &DownloadStreamOptions{ + AccessConditions: o.AccessConditions, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + Range: rnge, + RangeGetContentMD5: rangeGetContentMD5, + } +} + +// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo + + // CPKScopeInfo contains a group of parameters for client provided encryption scope. + CPKScopeInfo *CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// DownloadFileOptions contains the optional parameters for the DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. + DeleteSnapshots *DeleteSnapshotsOptionType + AccessConditions *AccessConditions + // Setting DeleteType to DeleteTypePermanent will permanently delete soft-delete snapshot and/or version blobs. + // WARNING: This is a dangerous operation and should not be used unless you know the implications. Please proceed + // with caution. + // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob + BlobDeleteType *DeleteType +} + +func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + } + + if o.AccessConditions == nil { + return &basics, nil, nil + } + + return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UndeleteOptions contains the optional parameters for the Client.Undelete method. +type UndeleteOptions struct { + // placeholder for future options +} + +func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTierOptions contains the optional parameters for the Client.SetTier method. +type SetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + AccessConditions *AccessConditions +} + +func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method +type GetPropertiesOptions struct { + AccessConditions *AccessConditions + CPKInfo *CPKInfo +} + +func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + AccessConditions *AccessConditions +} + +func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions provides set of configurations for Set Metadata on blob operation +type SetMetadataOptions struct { + AccessConditions *AccessConditions + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo +} + +func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CPKInfo, + *CPKScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. +type CreateSnapshotOptions struct { + Metadata map[string]*string + AccessConditions *AccessConditions + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo +} + +func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + + return &generated.BlobClientCreateSnapshotOptions{ + Metadata: o.Metadata, + }, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. +type StartCopyFromURLOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]*string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + AccessConditions *AccessConditions +} + +func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions, + *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientStartCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + Metadata: o.Metadata, + RehydratePriority: o.RehydratePriority, + SealBlob: o.SealBlob, + Tier: o.Tier, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method. +type AbortCopyFromURLOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTagsOptions contains the optional parameters for the Client.SetTags method. +type SetTagsOptions struct { + // The version id parameter is an opaque DateTime value that, when present, + // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + VersionID *string + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Optional header, Specifies the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AccessConditions *AccessConditions +} + +func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientSetTagsOptions{ + TransactionalContentMD5: o.TransactionalContentMD5, + TransactionalContentCRC64: o.TransactionalContentCRC64, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetTagsOptions contains the optional parameters for the Client.GetTags method. +type GetTagsOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. + Snapshot *string + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string + + BlobAccessConditions *AccessConditions +} + +func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientGetTagsOptions{ + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetImmutabilityPolicyOptions contains the parameter for Client.SetImmutabilityPolicy +type SetImmutabilityPolicyOptions struct { + // Specifies the immutability policy mode to set on the blob. Possible values to set include: "Locked", "Unlocked". + // "Mutable" can only be returned by service, don't set to "Mutable". If mode is not set - it will default to Unlocked. + Mode *ImmutabilityPolicySetting + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) { + if o == nil { + return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil + } + ac := &exported.BlobAccessConditions{ + ModifiedAccessConditions: o.ModifiedAccessConditions, + } + _, modifiedAccessConditions := exported.FormatBlobAccessConditions(ac) + + options := &generated.BlobClientSetImmutabilityPolicyOptions{ + ImmutabilityPolicyMode: o.Mode, + } + + return options, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteImmutabilityPolicyOptions contains the optional parameters for the Client.DeleteImmutabilityPolicy method. +type DeleteImmutabilityPolicyOptions struct { + // placeholder for future options +} + +func (o *DeleteImmutabilityPolicyOptions) format() *generated.BlobClientDeleteImmutabilityPolicyOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetLegalHoldOptions contains the optional parameters for the Client.SetLegalHold method. +type SetLegalHoldOptions struct { + // placeholder for future options +} + +func (o *SetLegalHoldOptions) format() *generated.BlobClientSetLegalHoldOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. +type CopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *AccessConditions + + CPKScopeInfo *CPKScopeInfo +} + +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &generated.BlobClientCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + CopySourceAuthorization: o.CopySourceAuthorization, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.BlobClientGetAccountInfoOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go new file mode 100644 index 0000000000..352d975264 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// DownloadResponse contains the response from method BlobClient.Download. +type DownloadResponse = generated.BlobClientDownloadResponse + +// DownloadStreamResponse contains the response from the DownloadStream method. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + DownloadResponse + ObjectReplicationRules []ObjectReplicationPolicy + + client *Client + getInfo httpGetterInfo + cpkInfo *CPKInfo + cpkScope *CPKScopeInfo +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DownloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + accessConditions := &AccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag}, + } + options := DownloadStreamOptions{ + Range: getInfo.Range, + AccessConditions: accessConditions, + CPKInfo: r.cpkInfo, + CPKScopeInfo: r.cpkScope, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// DeleteResponse contains the response from method BlobClient.Delete. +type DeleteResponse = generated.BlobClientDeleteResponse + +// UndeleteResponse contains the response from method BlobClient.Undelete. +type UndeleteResponse = generated.BlobClientUndeleteResponse + +// SetTierResponse contains the response from method BlobClient.SetTier. +type SetTierResponse = generated.BlobClientSetTierResponse + +// GetPropertiesResponse contains the response from method BlobClient.GetProperties. +type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse + +// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse + +// SetMetadataResponse contains the response from method BlobClient.SetMetadata. +type SetMetadataResponse = generated.BlobClientSetMetadataResponse + +// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse + +// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse + +// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse + +// SetTagsResponse contains the response from method BlobClient.SetTags. +type SetTagsResponse = generated.BlobClientSetTagsResponse + +// GetTagsResponse contains the response from method BlobClient.GetTags. +type GetTagsResponse = generated.BlobClientGetTagsResponse + +// SetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type SetImmutabilityPolicyResponse = generated.BlobClientSetImmutabilityPolicyResponse + +// DeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicyResponse. +type DeleteImmutabilityPolicyResponse = generated.BlobClientDeleteImmutabilityPolicyResponse + +// SetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse + +// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse + +// GetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type GetAccountInfoResponse = generated.BlobClientGetAccountInfoResponse + +// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse + +// BreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse + +// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse + +// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse + +// RenewLeaseResponse contains the response from method BlobClient.RenewLease. +type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go new file mode 100644 index 0000000000..a625c99532 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -0,0 +1,191 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + "net" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type httpGetterInfo struct { + Range HTTPRange + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag *azcore.ETag +} + +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryReaderOptions struct { + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 + + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) + + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + // The default value is false. + EarlyCloseAsError bool + + doInjectError bool + doInjectErrorRound int32 + injectedError error +} + +// RetryReader attempts to read from response, and if there is a retry-able network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response io.ReadCloser +} + +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } +} + +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { + if s.countWasBounded && s.info.Range.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + _ = s.Close() + + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.retryReaderOptions.MaxRetries + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.retryReaderOptions.OnFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +// ReadOnClosedBodyMessage of retry reader +const ReadOnClosedBodyMessage = "read on closed response body" + +// Close retry reader +func (s *RetryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil { + return s.response.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go new file mode 100644 index 0000000000..c2d517d8ad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ObjectReplicationRules struct +type ObjectReplicationRules struct { + RuleID string + Status string +} + +// ObjectReplicationPolicy are deserialized attributes. +type ObjectReplicationPolicy struct { + PolicyID *string + Rules *[]ObjectReplicationRules +} + +// deserializeORSPolicies is utility function to deserialize ORS Policies. +func deserializeORSPolicies(policies map[string]*string) (objectReplicationPolicies []ObjectReplicationPolicy) { + if policies == nil { + return nil + } + // For source blobs (blobs that have policy ids and rule ids applied to them), + // the header will be formatted as "x-ms-or-_: {Complete, Failed}". + // The value of this header is the status of the replication. + orPolicyStatusHeader := make(map[string]*string) + for key, value := range policies { + if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { + orPolicyStatusHeader[key] = value + } + } + + parsedResult := make(map[string][]ObjectReplicationRules) + for key, value := range orPolicyStatusHeader { + policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") + policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] + + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleID: ruleId, Status: *value}) + } + + for policyId, rules := range parsedResult { + objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ + PolicyID: &policyId, + Rules: &rules, + }) + } + return +} + +// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders. +func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { + return HTTPHeaders{ + BlobContentType: resp.ContentType, + BlobContentEncoding: resp.ContentEncoding, + BlobContentLanguage: resp.ContentLanguage, + BlobContentDisposition: resp.ContentDisposition, + BlobCacheControl: resp.CacheControl, + BlobContentMD5: resp.ContentMD5, + } +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go new file mode 100644 index 0000000000..07fad60611 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package bloberror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + BlobAlreadyExists Code = "BlobAlreadyExists" + BlobArchived Code = "BlobArchived" + BlobBeingRehydrated Code = "BlobBeingRehydrated" + BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" + BlobNotArchived Code = "BlobNotArchived" + BlobNotFound Code = "BlobNotFound" + BlobOverwritten Code = "BlobOverwritten" + BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" + BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit Code = "BlockCountExceedsLimit" + BlockListTooLong Code = "BlockListTooLong" + CannotChangeToLowerTier Code = "CannotChangeToLowerTier" + CannotVerifyCopySource Code = "CannotVerifyCopySource" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + ContainerAlreadyExists Code = "ContainerAlreadyExists" + ContainerBeingDeleted Code = "ContainerBeingDeleted" + ContainerDisabled Code = "ContainerDisabled" + ContainerNotFound Code = "ContainerNotFound" + ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" + CopyIDMismatch Code = "CopyIdMismatch" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" + IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" + IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidBlobOrBlock Code = "InvalidBlobOrBlock" + InvalidBlobTier Code = "InvalidBlobTier" + InvalidBlobType Code = "InvalidBlobType" + InvalidBlockID Code = "InvalidBlockId" + InvalidBlockList Code = "InvalidBlockList" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidOperation Code = "InvalidOperation" + InvalidPageRange Code = "InvalidPageRange" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidSourceBlobType Code = "InvalidSourceBlobType" + InvalidSourceBlobURL Code = "InvalidSourceBlobUrl" + InvalidURI Code = "InvalidUri" + InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + LeaseAlreadyBroken Code = "LeaseAlreadyBroken" + LeaseAlreadyPresent Code = "LeaseAlreadyPresent" + LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" + LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" + LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing Code = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost Code = "LeaseLost" + LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" + LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" + LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch Code = "Md5Mismatch" + CRC64Mismatch Code = "Crc64Mismatch" + MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation Code = "NoAuthenticationInformation" + NoPendingCopyOperation Code = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PendingCopyOperation Code = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" + ServerBusy Code = "ServerBusy" + SnapshotCountExceeded Code = "SnapshotCountExceeded" + SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" + SnapshotsPresent Code = "SnapshotsPresent" + SourceConditionNotMet Code = "SourceConditionNotMet" + SystemInUse Code = "SystemInUse" + TargetConditionNotMet Code = "TargetConditionNotMet" + UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") + UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated") +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go new file mode 100644 index 0000000000..24df42c75e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -0,0 +1,249 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "io" + "sync" + "sync/atomic" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error) + Upload(context.Context, io.ReadSeekCloser, *UploadOptions) (UploadResponse, error) + CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) { + options.setDefaults() + + wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing + errCh := make(chan error, 1) // contains the first error encountered during processing + + buffers := getBufferManager(options.Concurrency, options.BlockSize) + defer buffers.Free() + + // this controls the lifetime of the uploading goroutines. + // if an error is encountered, cancel() is called which will terminate all uploads. + // NOTE: the ordering is important here. cancel MUST execute before + // cleaning up the buffers so that any uploading goroutines exit first, + // releasing their buffers back to the pool for cleanup. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // all blocks have IDs that start with a random UUID + blockIDPrefix, err := uuid.New() + if err != nil { + return CommitBlockListResponse{}, err + } + tracker := blockTracker{ + blockIDPrefix: blockIDPrefix, + options: options, + } + + // This goroutine grabs a buffer, reads from the stream into the buffer, + // then creates a goroutine to upload/stage the block. + for blockNum := uint32(0); true; blockNum++ { + var buffer T + select { + case buffer = <-buffers.Acquire(): + // got a buffer + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return CommitBlockListResponse{}, err + } + + // either grab the newly allocated buffer or wait for one to become available + buffer = <-buffers.Acquire() + } + + var n int + n, err = shared.ReadAtLeast(src, buffer, len(buffer)) + + if n > 0 { + // some data was read, upload it + wg.Add(1) // We're posting a buffer to be sent + + // NOTE: we must pass blockNum as an arg to our goroutine else + // it's captured by reference and can change underneath us! + go func(blockNum uint32) { + // Upload the outgoing block, matching the number of bytes read + err := tracker.uploadBlock(ctx, dst, blockNum, buffer[:n]) + if err != nil { + select { + case errCh <- err: + // error was set + default: + // some other error is already set + } + cancel() + } + buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now + + // signal that the block has been staged. + // we MUST do this after attempting to write to errCh + // to avoid it racing with the reading goroutine. + wg.Done() + }(blockNum) + } else { + // nothing was read so the buffer is empty, send it back for reuse/clean-up. + buffers.Release(buffer) + } + + if err != nil { // The reader is done, no more outgoing buffers + if errors.Is(err, io.EOF) { + // these are expected errors, we don't surface those + err = nil + } else { + // some other error happened, terminate any outstanding uploads + cancel() + } + break + } + } + + wg.Wait() // Wait for all outgoing blocks to complete + + if err != nil { + // there was an error reading from src, favor this error over any error during staging + return CommitBlockListResponse{}, err + } + + select { + case err = <-errCh: + // there was an error during staging + return CommitBlockListResponse{}, err + default: + // no error was encountered + } + + // If no error, after all blocks uploaded, commit them to the blob & return the result + return tracker.commitBlocks(ctx, dst) +} + +// used to manage the uploading and committing of blocks +type blockTracker struct { + blockIDPrefix uuid.UUID // UUID used with all blockIDs + maxBlockNum uint32 // defaults to 0 + firstBlock []byte // Used only if maxBlockNum is 0 + options UploadStreamOptions +} + +func (bt *blockTracker) uploadBlock(ctx context.Context, to blockWriter, num uint32, buffer []byte) error { + if num == 0 { + bt.firstBlock = buffer + + // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation + // If the payload is exactly the same size as the buffer, there may be more content coming in. + if len(buffer) < int(bt.options.BlockSize) { + return nil + } + } else { + // Else, upload a staged block... + atomicMorphUint32(&bt.maxBlockNum, func(startVal uint32) (val uint32, morphResult uint32) { + // Atomically remember (in t.numBlocks) the maximum block num we've ever seen + if startVal < num { + return num, 0 + } + return startVal, 0 + }) + } + + blockID := newUUIDBlockID(bt.blockIDPrefix).WithBlockNumber(num).ToBase64() + _, err := to.StageBlock(ctx, blockID, streaming.NopCloser(bytes.NewReader(buffer)), bt.options.getStageBlockOptions()) + return err +} + +func (bt *blockTracker) commitBlocks(ctx context.Context, to blockWriter) (CommitBlockListResponse, error) { + // If the first block had the exact same size as the buffer + // we would have staged it as a block thinking that there might be more data coming + if bt.maxBlockNum == 0 && len(bt.firstBlock) < int(bt.options.BlockSize) { + // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation + up, err := to.Upload(ctx, streaming.NopCloser(bytes.NewReader(bt.firstBlock)), bt.options.getUploadOptions()) + if err != nil { + return CommitBlockListResponse{}, err + } + + // convert UploadResponse to CommitBlockListResponse + return CommitBlockListResponse{ + ClientRequestID: up.ClientRequestID, + ContentMD5: up.ContentMD5, + Date: up.Date, + ETag: up.ETag, + EncryptionKeySHA256: up.EncryptionKeySHA256, + EncryptionScope: up.EncryptionScope, + IsServerEncrypted: up.IsServerEncrypted, + LastModified: up.LastModified, + RequestID: up.RequestID, + Version: up.Version, + VersionID: up.VersionID, + //ContentCRC64: up.ContentCRC64, doesn't exist on UploadResponse + }, nil + } + + // Multiple blocks staged, commit them all now + blockID := newUUIDBlockID(bt.blockIDPrefix) + blockIDs := make([]string, bt.maxBlockNum+1) + for bn := uint32(0); bn < bt.maxBlockNum+1; bn++ { + blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() + } + + return to.CommitBlockList(ctx, blockIDs, bt.options.getCommitBlockListOptions()) +} + +// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult uint32) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) uint32 { + for { + currentVal := atomic.LoadUint32(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { + return morphResult + } + } +} + +type blockID [64]byte + +func (blockID blockID) ToBase64() string { + return base64.StdEncoding.EncodeToString(blockID[:]) +} + +type uuidBlockID blockID + +func newUUIDBlockID(u uuid.UUID) uuidBlockID { + ubi := uuidBlockID{} // Create a new uuidBlockID + copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it + // Block number defaults to 0 + return ubi +} + +func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { + binary.BigEndian.PutUint32(ubi[len(uuid.UUID{}):], blockNumber) // Put block number after UUID + return ubi // Return the passed-in copy +} + +func (ubi uuidBlockID) ToBase64() string { + return blockID(ubi).ToBase64() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go new file mode 100644 index 0000000000..7a3ab3fe8d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -0,0 +1,597 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "io" + "math" + "os" + "reflect" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client defines a set of operations applicable to block blobs. +type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (bb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) +} + +func (bb *Client) generated() *generated.BlockBlobClient { + _, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return blockBlob +} + +func (bb *Client) innerBlobGenerated() *generated.BlobClient { + b := bb.BlobClient() + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (bb *Client) URL() string { + return bb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this BlockBlob client. +func (bb *Client) BlobClient() *blob.Client { + blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return (*blob.Client)(blobClient) +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (bb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return UploadResponse{}, err + } + + opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return UploadResponse{}, err + } + } + + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) + return resp, err +} + +// UploadBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. Partial updates are not supported with Put Blob from URL; the content of an existing blob is overwritten +// with the content of the new blob. To perform partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// For more information, see https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url +func (bb *Client) UploadBlobFromURL(ctx context.Context, copySource string, options *UploadBlobFromURLOptions) (UploadBlobFromURLResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions := options.format() + + resp, err := bb.generated().PutBlobFromURL(ctx, int64(0), copySource, opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions) + + return resp, err +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return StageBlockResponse{}, err + } + + opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return StageBlockResponse{}, nil + } + } + + resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo) + return resp, err +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { + + stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, 0, sourceURL, stageBlockFromURLOptions, + cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) { + // this is a code smell in the generated code + blockIds := make([]*string, len(base64BlockIDs)) + for k, v := range base64BlockIDs { + blockIds[k] = to.Ptr(v) + } + + blockLookupList := generated.BlockLookupList{Latest: blockIds} + + var commitOptions *generated.BlockBlobClientCommitBlockListOptions + var headers *generated.BlobHTTPHeaders + var leaseAccess *blob.LeaseAccessConditions + var cpkInfo *generated.CPKInfo + var cpkScope *generated.CPKScopeInfo + var modifiedAccess *generated.ModifiedAccessConditions + + if options != nil { + commitOptions = &generated.BlockBlobClientCommitBlockListOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags), + Metadata: options.Metadata, + RequestID: options.RequestID, + Tier: options.Tier, + Timeout: options.Timeout, + TransactionalContentCRC64: options.TransactionalContentCRC64, + TransactionalContentMD5: options.TransactionalContentMD5, + LegalHold: options.LegalHold, + ImmutabilityPolicyMode: options.ImmutabilityPolicyMode, + ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime, + } + + // If user attempts to pass in their own checksum, errors out. + if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil { + return CommitBlockListResponse{}, bloberror.UnsupportedChecksum + } + + headers = options.HTTPHeaders + leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) + cpkInfo = options.CPKInfo + cpkScope = options.CPKScopeInfo + } + + resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) + return resp, err +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) { + o, lac, mac := options.format() + + resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac) + + return resp, err +} + +// Redeclared APIs ----- Copy over to Append blob and Page blob as well. + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return bb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return bb.BlobClient().Undelete(ctx, o) +} + +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { + return bb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { + return bb.BlobClient().DeleteImmutabilityPolicy(ctx, options) +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { + return bb.BlobClient().SetLegalHold(ctx, legalHold, options) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return bb.BlobClient().SetTier(ctx, tier, o) +} + +// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry +func (bb *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) { + if expiryType == nil { + expiryType = ExpiryTypeNever{} + } + et, opts := expiryType.Format(o) + resp, err := bb.innerBlobGenerated().SetExpiry(ctx, et, opts) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return bb.BlobClient().GetProperties(ctx, o) +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return bb.BlobClient().GetAccountInfo(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (bb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return bb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return bb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return bb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return bb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return bb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return bb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return bb.BlobClient().GetSASURL(permissions, expiry, o) +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in blocks to a block blob. +func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error + if actualSize > MaxStageBlockBytes*MaxBlocks { + return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if actualSize <= MaxUploadBlobBytes { + o.BlockSize = MaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = int64(math.Ceil(float64(actualSize) / MaxBlocks)) // ceil(buffer / max blocks) = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = blob.DefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). + } + } + + if actualSize <= MaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, actualSize) + if o.Progress != nil { + body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) + } + + uploadBlockBlobOptions := o.getUploadBlockBlobOptions() + resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions) + + return toUploadReaderAtResponseFromUploadResponse(resp), err + } + + var numBlocks = uint16(((actualSize - 1) / o.BlockSize) + 1) + if numBlocks > MaxBlocks { + // prevent any math bugs from attempting to upload too many blocks which will always fail + return uploadFromReaderResponse{}, errors.New("block limit exceeded") + } + + if log.Should(exported.EventUpload) { + urlparts, err := blob.ParseURL(bb.generated().Endpoint()) + if err == nil { + log.Writef(exported.EventUpload, "blob name %s actual size %v block-size %v block-count %v", + urlparts.BlobName, actualSize, o.BlockSize, numBlocks) + } + } + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: actualSize, + ChunkSize: o.BlockSize, + NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1), + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, offset int64, chunkSize int64) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + if chunkSize < o.BlockSize { + // this is the last block. its actual size might be less + // than the calculated size due to rounding up of the payload + // size to fit in a whole number of blocks. + chunkSize = (actualSize - offset) + } + var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = streaming.NewRequestProgress(shared.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + generatedUuid, err := uuid.New() + if err != nil { + return err + } + blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) + stageBlockOptions := o.getStageBlockOptions() + _, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions) + return err + }, + }) + if err != nil { + return uploadFromReaderResponse{}, err + } + // All put blocks were successful, call Put Block List to finalize the blob + commitBlockListOptions := o.getCommitBlockListOptions() + resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) + + return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadBufferResponse{}, bloberror.UnsupportedChecksum + } + + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) +} + +// UploadFile uploads a file in blocks to a block blob. +func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + stat, err := file.Stat() + if err != nil { + return uploadFromReaderResponse{}, err + } + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadFileResponse{}, bloberror.UnsupportedChecksum + } + + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + if o == nil { + o = &UploadStreamOptions{} + } + + // If user attempts to pass in their own checksum, errors out. + if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func { + return UploadStreamResponse{}, bloberror.UnsupportedChecksum + } + + result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool) + if err != nil { + return CommitBlockListResponse{}, err + } + + return result, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return bb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return bb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go new file mode 100644 index 0000000000..ce3a5d8de3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -0,0 +1,52 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const ( + // CountToEnd specifies the end of the file. + CountToEnd = 0 + + _1MiB = 1024 * 1024 + + // MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB + + // MaxBlocks indicates the maximum number of blocks allowed in a block blob. + MaxBlocks = 50000 +) + +// BlockListType defines values for BlockListType +type BlockListType = generated.BlockListType + +const ( + BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted + BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted + BlockListTypeAll BlockListType = generated.BlockListTypeAll +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return generated.PossibleBlockListTypeValues() +} + +// BlobCopySourceTags - can be 'COPY' or 'REPLACE' +type BlobCopySourceTags = generated.BlobCopySourceTags + +const ( + BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY + BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return generated.PossibleBlobCopySourceTagsValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go new file mode 100644 index 0000000000..453d569e5d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -0,0 +1,411 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block = generated.Block + +// BlockList - can be uncommitted or committed blocks (committed/uncommitted) +type BlockList = generated.BlockList + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// UploadOptions contains the optional parameters for the Client.Upload method. +type UploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType + + HTTPHeaders *blob.HTTPHeaders + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + AccessConditions *blob.AccessConditions + LegalHold *bool + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead + TransactionalContentMD5 []byte +} + +func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + basics := generated.BlockBlobClientUploadOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + LegalHold: o.LegalHold, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiryTime, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadBlobFromURLOptions contains the optional parameters for the Client.UploadBlobFromURL method. +type UploadBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Specifies the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Additional optional headers + HTTPHeaders *blob.HTTPHeaders + AccessConditions *blob.AccessConditions + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions +} + +func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFromURLOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, + *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := generated.BlockBlobClientPutBlobFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + CopySourceAuthorization: o.CopySourceAuthorization, + CopySourceBlobProperties: o.CopySourceBlobProperties, + CopySourceTags: o.CopySourceTags, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +type StageBlockOptions struct { + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo + + LeaseAccessConditions *blob.LeaseAccessConditions + + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType +} + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo) { + if o == nil { + return nil, nil, nil, nil + } + + return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CPKInfo, o.CPKScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method. +type StageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + LeaseAccessConditions *blob.LeaseAccessConditions + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. + SourceContentValidation blob.SourceContentValidationType + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo +} + +func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &generated.BlockBlobClientStageBlockFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + SourceRange: exported.FormatHTTPRange(o.Range), + } + + if o.SourceContentValidation != nil { + o.SourceContentValidation.Apply(options) + } + + return options, o.CPKInfo, o.CPKScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. +type CommitBlockListOptions struct { + Tags map[string]string + Metadata map[string]*string + RequestID *string + Tier *blob.AccessTier + Timeout *int32 + HTTPHeaders *blob.HTTPHeaders + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + AccessConditions *blob.AccessConditions + LegalHold *bool + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentCRC64 cannot be generated + TransactionalContentCRC64 []byte + + // Deprecated: TransactionalContentMD5 cannot be generated + TransactionalContentMD5 []byte +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method. +type GetBlockListOptions struct { + Snapshot *string + AccessConditions *blob.AccessConditions +} + +func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions +} + +// ------------------------------------------------------------ + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // HTTPHeaders indicates the HTTP headers to be associated with the blob. + HTTPHeaders *blob.HTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata map[string]*string + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions *blob.AccessConditions + + // AccessTier indicates the tier of blob + AccessTier *blob.AccessTier + + // BlobTags + Tags map[string]string + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) + Concurrency uint16 + + TransactionalValidation blob.TransferValidationType + + // Deprecated: TransactionalContentCRC64 cannot be generated at block level + TransactionalContentCRC64 uint64 + + // Deprecated: TransactionalContentMD5 cannot be generated at block level + TransactionalContentMD5 []byte +} + +// UploadBufferOptions provides set of configurations for UploadBuffer operation. +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for UploadFile operation. +type UploadFileOptions = uploadFromReaderOptions + +func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) + return &StageBlockOptions{ + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + + TransactionalValidation: o.TransactionalValidation, + } +} + +func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { + return &UploadOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + AccessConditions: o.AccessConditions, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + } +} + +func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions { + return &CommitBlockListOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for UploadStream operation. +type UploadStreamOptions struct { + // BlockSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB. + BlockSize int64 + + // Concurrency defines the max number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size BlockSize. The default value is one. + Concurrency int + + TransactionalValidation blob.TransferValidationType + + HTTPHeaders *blob.HTTPHeaders + Metadata map[string]*string + AccessConditions *blob.AccessConditions + AccessTier *blob.AccessTier + Tags map[string]string + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo +} + +func (u *UploadStreamOptions) setDefaults() { + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.BlockSize < _1MiB { + u.BlockSize = _1MiB + } +} + +func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { + if u == nil { + return nil + } + + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) + return &StageBlockOptions{ + TransactionalValidation: u.TransactionalValidation, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions { + if u == nil { + return nil + } + + return &CommitBlockListOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + AccessConditions: u.AccessConditions, + } +} + +func (u *UploadStreamOptions) getUploadOptions() *UploadOptions { + if u == nil { + return nil + } + + return &UploadOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + AccessConditions: u.AccessConditions, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ExpiryType defines values for ExpiryType. +type ExpiryType = exported.ExpiryType + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry. +type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry. +type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry. +type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation + +// ExpiryTypeNever defines that the blob will be set to never expire. +type ExpiryTypeNever = exported.ExpiryTypeNever + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions = exported.SetExpiryOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go new file mode 100644 index 0000000000..917f718097 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -0,0 +1,117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// UploadResponse contains the response from method Client.Upload. +type UploadResponse = generated.BlockBlobClientUploadResponse + +// UploadBlobFromURLResponse contains the response from the method Client.UploadBlobFromURL +type UploadBlobFromURLResponse = generated.BlockBlobClientPutBlobFromURLResponse + +// StageBlockResponse contains the response from method Client.StageBlock. +type StageBlockResponse = generated.BlockBlobClientStageBlockResponse + +// CommitBlockListResponse contains the response from method Client.CommitBlockList. +type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse + +// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL. +type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse + +// GetBlockListResponse contains the response from method Client.GetBlockList. +type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse + +// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type uploadFromReaderResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + // Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB) + ContentCRC64 []byte +} + +func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + } +} + +func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + ContentCRC64: resp.ContentCRC64, + } +} + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = uploadFromReaderResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = uploadFromReaderResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = CommitBlockListResponse + +// SetExpiryResponse contains the response from method Client.SetExpiry. +type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go new file mode 100644 index 0000000000..073de855b6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "strings" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions + + // Audience to use when requesting tokens for Azure Active Directory authentication. + // Only has an effect when credential is of type TokenCredential. The value could be + // https://storage.azure.com/ (default) or https://.blob.core.windows.net. + Audience string +} + +type Client[T any] struct { + inner *T + credential any + options *ClientOptions +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + switch cred := client.credential.(type) { + case *exported.SharedKeyCredential: + return cred + default: + return nil + } +} + +func Credential[T any](client *Client[T]) any { + return client.credential +} + +func GetClientOptions[T any](client *Client[T]) *ClientOptions { + return client.options +} + +func GetAudience(clOpts *ClientOptions) string { + if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 { + return shared.TokenScope + } else { + return strings.TrimRight(clOpts.Audience, "/") + "/.default" + } +} + +func NewClient[T any](inner *T) *Client[T] { + return &Client[T]{inner: inner} +} + +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(containerURL, azClient), + credential: credential, + options: options, + } +} + +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] { + return &Client[generated.ContainerClient]{ + inner: generated.NewContainerClient(containerURL, azClient), + credential: credential, + options: options, + } +} + +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] { + return &Client[generated.BlobClient]{ + inner: generated.NewBlobClient(blobURL, azClient), + credential: credential, + options: options, + } +} + +type CompositeClient[T, U any] struct { + innerT *T + innerU *U + sharedKey *exported.SharedKeyCredential +} + +func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { + return &Client[T]{ + inner: client.innerT, + credential: client.sharedKey, + }, client.innerU +} + +func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { + return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewAppendBlobClient(blobURL, azClient), + sharedKey: sharedKey, + } +} + +func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { + return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewBlockBlobClient(blobURL, azClient), + sharedKey: sharedKey, + } +} + +func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { + return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewPageBlobClient(blobURL, azClient), + sharedKey: sharedKey, + } +} + +func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential { + return client.sharedKey +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go new file mode 100644 index 0000000000..96d188fa56 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func FormatContainerAccessConditions(b *ContainerAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func FormatBlobAccessConditions(b *BlobAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = generated.ModifiedAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go new file mode 100644 index 0000000000..14c293cf65 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "fmt" +) + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p *AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go new file mode 100644 index 0000000000..c26c62aa88 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -0,0 +1,280 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +const ( + batchIdPrefix = "batch_" + httpVersion = "HTTP/1.1" + httpNewline = "\r\n" +) + +// createBatchID is used for creating a new batch id which is used as batch boundary in the request body +func createBatchID() (string, error) { + batchID, err := uuid.New() + if err != nil { + return "", err + } + + return batchIdPrefix + batchID.String(), nil +} + +// buildSubRequest is used for building the sub-request. Example: +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account: +// Content-Length: 0 +func buildSubRequest(req *policy.Request) []byte { + var batchSubRequest strings.Builder + blobPath := req.Raw().URL.EscapedPath() + if len(req.Raw().URL.RawQuery) > 0 { + blobPath += "?" + req.Raw().URL.RawQuery + } + + batchSubRequest.WriteString(fmt.Sprintf("%s %s %s%s", req.Raw().Method, blobPath, httpVersion, httpNewline)) + + for k, v := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + continue + } + if len(v) > 0 { + batchSubRequest.WriteString(fmt.Sprintf("%v: %v%v", k, v[0], httpNewline)) + } + } + + batchSubRequest.WriteString(httpNewline) + return []byte(batchSubRequest.String()) +} + +// CreateBatchRequest creates a new batch request using the sub-requests present in the BlobBatchBuilder. +// +// Example of a sub-request in the batch request body: +// +// --batch_357de4f7-6d0b-4e02-8cd2-6361411a9525 +// Content-Type: application/http +// Content-Transfer-Encoding: binary +// Content-ID: 0 +// +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account: +// Content-Length: 0 +func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { + batchID, err := createBatchID() + if err != nil { + return nil, "", err + } + + // Create a new multipart buffer + reqBody := &bytes.Buffer{} + writer := multipart.NewWriter(reqBody) + + // Set the boundary + err = writer.SetBoundary(batchID) + if err != nil { + return nil, "", err + } + + partHeaders := make(textproto.MIMEHeader) + partHeaders["Content-Type"] = []string{"application/http"} + partHeaders["Content-Transfer-Encoding"] = []string{"binary"} + var partWriter io.Writer + + for i, req := range bb.SubRequests { + if bb.AuthPolicy != nil { + _, err := bb.AuthPolicy.Do(req) + if err != nil && !strings.EqualFold(err.Error(), "no more policies") { + if log.Should(EventSubmitBatch) { + log.Writef(EventSubmitBatch, "failed to authorize sub-request for %v.\nError: %v", req.Raw().URL.Path, err.Error()) + } + return nil, "", err + } + } + + partHeaders["Content-ID"] = []string{fmt.Sprintf("%v", i)} + partWriter, err = writer.CreatePart(partHeaders) + if err != nil { + return nil, "", err + } + + _, err = partWriter.Write(buildSubRequest(req)) + if err != nil { + return nil, "", err + } + } + + // Close the multipart writer + err = writer.Close() + if err != nil { + return nil, "", err + } + + return reqBody.Bytes(), batchID, nil +} + +// UpdateSubRequestHeaders updates the sub-request headers. +// Removes x-ms-version header. +func UpdateSubRequestHeaders(req *policy.Request) { + // remove x-ms-version header from the request header + for k := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + delete(req.Raw().Header, k) + } + } +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem struct { + ContentID *int + ContainerName *string + BlobName *string + RequestID *string + Version *string + Error error // nil error indicates that the batch sub-request operation is successful +} + +func getResponseBoundary(contentType *string) (string, error) { + if contentType == nil { + return "", fmt.Errorf("Content-Type returned in SubmitBatch response is nil") + } + + _, params, err := mime.ParseMediaType(*contentType) + if err != nil { + return "", err + } + + if val, ok := params["boundary"]; ok { + return val, nil + } else { + return "", fmt.Errorf("batch boundary not present in Content-Type header of the SubmitBatch response.\nContent-Type: %v", *contentType) + } +} + +func getContentID(part *multipart.Part) (*int, error) { + contentID := part.Header.Get("Content-ID") + if contentID == "" { + return nil, nil + } + + val, err := strconv.Atoi(strings.TrimSpace(contentID)) + if err != nil { + return nil, err + } + return &val, nil +} + +func getResponseHeader(key string, resp *http.Response) *string { + val := resp.Header.Get(key) + if val == "" { + return nil + } + return &val +} + +// ParseBlobBatchResponse is used for parsing the batch response body into individual sub-responses for each item in the batch. +func ParseBlobBatchResponse(respBody io.ReadCloser, contentType *string, subRequests []*policy.Request) ([]*BatchResponseItem, error) { + boundary, err := getResponseBoundary(contentType) + if err != nil { + return nil, err + } + + respReader := multipart.NewReader(respBody, boundary) + var responses []*BatchResponseItem + + for { + part, err := respReader.NextPart() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + + batchSubResponse := &BatchResponseItem{} + batchSubResponse.ContentID, err = getContentID(part) + if err != nil { + return nil, err + } + + if batchSubResponse.ContentID != nil { + path := strings.Trim(subRequests[*batchSubResponse.ContentID].Raw().URL.Path, "/") + p := strings.Split(path, "/") + batchSubResponse.ContainerName = to.Ptr(p[0]) + batchSubResponse.BlobName = to.Ptr(strings.Join(p[1:], "/")) + } + + respBytes, err := io.ReadAll(part) + if err != nil { + return nil, err + } + respBytes = append(respBytes, byte('\n')) + buf := bytes.NewBuffer(respBytes) + resp, err := http.ReadResponse(bufio.NewReader(buf), nil) + // sub-response parsing error + if err != nil { + return nil, err + } + + batchSubResponse.RequestID = getResponseHeader(shared.HeaderXmsRequestID, resp) + batchSubResponse.Version = getResponseHeader(shared.HeaderXmsVersion, resp) + + // sub-response failure + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if len(responses) == 0 && batchSubResponse.ContentID == nil { + // this case can happen when the parent request fails. + // For example, batch request having more than 256 sub-requests. + return nil, fmt.Errorf("%v", string(respBytes)) + } + + resp.Request = subRequests[*batchSubResponse.ContentID].Raw() + batchSubResponse.Error = runtime.NewResponseError(resp) + } + + responses = append(responses, batchSubResponse) + } + + if len(responses) != len(subRequests) { + return nil, fmt.Errorf("expected %v responses, got %v for the batch ID: %v", len(subRequests), len(responses), boundary) + } + + return responses, nil +} + +// not exported but used for batch request creation + +// BlobBatchBuilder is used for creating the blob batch request +type BlobBatchBuilder struct { + AuthPolicy policy.Policy + SubRequests []*policy.Request +} + +// BlobBatchOperationType defines the operation of the blob batch sub-requests. +type BlobBatchOperationType string + +const ( + BatchDeleteOperationType BlobBatchOperationType = "delete" + BatchSetTierOperationType BlobBatchOperationType = "set tier" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go new file mode 100644 index 0000000000..d0355727c9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -0,0 +1,33 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "fmt" + "strconv" +) + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset and zero value count indicates from the offset to the resource's end. +type HTTPRange struct { + Offset int64 + Count int64 +} + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go new file mode 100644 index 0000000000..d775fb5c88 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go @@ -0,0 +1,20 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azblob/log.go +const ( + // EventUpload is used when we compute number of blocks to upload and size of each block. + EventUpload log.Event = "azblob.Upload" + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch log.Event = "azblob.SubmitBatch" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go new file mode 100644 index 0000000000..71473decab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go @@ -0,0 +1,71 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "net/http" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// ExpiryType defines values for ExpiryType +type ExpiryType interface { + Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) + notPubliclyImplementable() +} + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry +type ExpiryTypeAbsolute time.Time + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry +type ExpiryTypeRelativeToNow time.Duration + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry +type ExpiryTypeRelativeToCreation time.Duration + +// ExpiryTypeNever defines that the blob will be set to never expire +type ExpiryTypeNever struct { + // empty struct since NeverExpire expiry type does not require expiry time +} + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions struct { + // placeholder for future options +} + +func (e ExpiryTypeAbsolute) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsAbsolute, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(time.Time(e).UTC().Format(http.TimeFormat)), + } +} + +func (e ExpiryTypeAbsolute) notPubliclyImplementable() {} + +func (e ExpiryTypeRelativeToNow) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsRelativeToNow, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), + } +} + +func (e ExpiryTypeRelativeToNow) notPubliclyImplementable() {} + +func (e ExpiryTypeRelativeToCreation) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsRelativeToCreation, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), + } +} + +func (e ExpiryTypeRelativeToCreation) notPubliclyImplementable() {} + +func (e ExpiryTypeNever) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsNeverExpire, nil +} + +func (e ExpiryTypeNever) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go new file mode 100644 index 0000000000..b0be323b7b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -0,0 +1,314 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := getHeader(shared.HeaderContentLength, headers) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), + contentLength, + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), + "", // Empty date because x-ms-date is expected (as per web page above) + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + +func getWeightTables() [][]int { + tableLv0 := [...]int{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725, + 0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e, + 0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, + 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, + 0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, + 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, + 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0, + } + tableLv2 := [...]int{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } + tables := [][]int{tableLv0[:], tableLv2[:]} + return tables +} + +// NewHeaderStringComparer performs a multi-level, weight-based comparison of two strings +func compareHeaders(lhs, rhs string, tables [][]int) int { + currLevel, i, j := 0, 0, 0 + n := len(tables) + lhsLen := len(lhs) + rhsLen := len(rhs) + + for currLevel < n { + if currLevel == (n-1) && i != j { + if i > j { + return -1 + } + if i < j { + return 1 + } + return 0 + } + + var w1, w2 int + + // Check bounds before accessing lhs[i] + if i < lhsLen { + w1 = tables[currLevel][lhs[i]] + } else { + w1 = 0x1 + } + + // Check bounds before accessing rhs[j] + if j < rhsLen { + w2 = tables[currLevel][rhs[j]] + } else { + w2 = 0x1 + } + + if w1 == 0x1 && w2 == 0x1 { + i = 0 + j = 0 + currLevel++ + } else if w1 == w2 { + i++ + j++ + } else if w1 == 0 { + i++ + } else if w2 == 0 { + j++ + } else { + if w1 < w2 { + return -1 + } + if w1 > w2 { + return 1 + } + return 0 + } + } + return 0 +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + tables := getWeightTables() + // Sort the keys using the custom comparator + sort.Slice(keys, func(i, j int) bool { + return compareHeaders(keys[i], keys[j], tables) < 0 + }) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} +} + +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SharedKeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if s.cred == nil { + return req.Next() + } + + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.computeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go new file mode 100644 index 0000000000..f3e571fa6a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "encoding/binary" + "hash/crc64" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType interface { + Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + notPubliclyImplementable() +} + +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. +type TransferValidationTypeCRC64 uint64 + +func (c TransferValidationTypeCRC64) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, uint64(c)) + cfg.SetCRC64(buf) + return rsc, nil +} + +func (TransferValidationTypeCRC64) notPubliclyImplementable() {} + +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return transferValidationTypeFn(func(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf, err := io.ReadAll(rsc) + if err != nil { + return nil, err + } + + crc := crc64.Checksum(buf, shared.CRC64Table) + return TransferValidationTypeCRC64(crc).Apply(streaming.NopCloser(bytes.NewReader(buf)), cfg) + }) +} + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 []byte + +func (c TransferValidationTypeMD5) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + cfg.SetMD5(c) + return rsc, nil +} + +func (TransferValidationTypeMD5) notPubliclyImplementable() {} + +type transferValidationTypeFn func(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + +func (t transferValidationTypeFn) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + return t(rsc, cfg) +} + +func (transferValidationTypeFn) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go new file mode 100644 index 0000000000..2e2dd16e42 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go @@ -0,0 +1,64 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it +func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential { + return &UserDelegationCredential{ + accountName: accountName, + userDelegationKey: udk, + } +} + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential struct { + accountName string + userDelegationKey UserDelegationKey +} + +// getAccountName returns the Storage account's Name +func (f *UserDelegationCredential) getAccountName() string { + return f.accountName +} + +// GetAccountName is a helper method for accessing the user delegation key parameters outside this package. +func GetAccountName(udc *UserDelegationCredential) string { + return udc.getAccountName() +} + +// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) { + bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value) + h := hmac.New(sha256.New, bytes) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package. +func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) { + return udc.computeHMACSHA256(message) +} + +// getUDKParams returns UserDelegationKey +func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.userDelegationKey +} + +// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package. +func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey { + return udc.getUDKParams() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go new file mode 100644 index 0000000000..3e95fff69a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + ModuleVersion = "v1.5.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go new file mode 100644 index 0000000000..288df7edda --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -0,0 +1,32 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *AppendBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *AppendBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient { + client := &AppendBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md new file mode 100644 index 0000000000..afc96b4d74 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -0,0 +1,521 @@ +# Code Generation - Azure Blob SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/f6f50c6388fd5836fa142384641b8353a99874ef/specification/storage/data-plane/Microsoft.BlobStorage/stable/2024-08-04/blob.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: ../generated +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.65" +``` + +### Add Owner,Group,Permissions,Acl,ResourceType in ListBlob Response +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobPropertiesInternal.properties["Owner"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Group"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Permissions"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Acl"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["ResourceType"] = { + "type" : "string", + }; + +``` + +### Add permissions in ListBlobsInclude +``` yaml +directive: +- from: swagger-document + where: $.parameters.ListBlobsInclude + transform: > + $.items.enum.push("permissions"); +``` + +### Updating service version to 2024-11-04 +```yaml +directive: +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replaceAll(`[]string{"2024-08-04"}`, `[]string{ServiceVersion}`); +``` + +### Fix CRC Response Header in PutBlob response +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}?BlockBlob"].put.responses["201"].headers + transform: > + $["x-ms-content-crc64"] = { + "x-ms-client-name": "ContentCRC64", + "type": "string", + "format": "byte", + "description": "Returned for a block blob so that the client can check the integrity of message content." + }; +``` + +### Undo breaking change with BlobName +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/Name\s+\*BlobName/g, `Name *string`); +``` + +### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; +``` + +### Remove pager methods and export various generated methods in container client + +``` yaml +directive: + - from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` + +# Export various createRequest/HandleResponse methods + +``` yaml +directive: +- from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }). + replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` }); + +- from: zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }). + replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` }); +``` + +### Clean up some const type names so they don't stutter + +``` yaml +directive: +- from: swagger-document + where: $.parameters['BlobDeleteType'] + transform: > + $["x-ms-enum"].name = "DeleteType"; + $["x-ms-client-name"] = "DeleteType"; + +- from: swagger-document + where: $.parameters['BlobExpiryOptions'] + transform: > + $["x-ms-enum"].name = "ExpiryOptions"; + $["x-ms-client-name"].name = "ExpiryOptions"; + +- from: swagger-document + where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"] + transform: > + $["x-ms-client-name"].name = "ImmutabilityPolicyMode"; + $.enum = [ "Mutable", "Unlocked", "Locked"]; + $["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false }; + +- from: swagger-document + where: $.parameters['ImmutabilityPolicyMode'] + transform: > + $["x-ms-enum"].name = "ImmutabilityPolicySetting"; + $["x-ms-client-name"].name = "ImmutabilityPolicySetting"; + +- from: swagger-document + where: $.definitions['BlobPropertiesInternal'] + transform: > + $.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: + - zz_models.go + - zz_options.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). + replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). + replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). + replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). + replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); + +- from: zz_responses.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). + replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). + replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); +``` + +### Unsure why this casing changed, but fixing it + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/SignedOid\s+\*string/g, `SignedOID *string`). + replace(/SignedTid\s+\*string/g, `SignedTID *string`); +``` + +### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed + +``` yaml +directive: +- from: zz_constants.go + where: $ + transform: >- + return $. + replace(/IncrementalCopyOfEralierVersionSnapshotNotAllowed/g, "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"); +``` + +### Fix up x-ms-content-crc64 header response name + +``` yaml +directive: +- from: swagger-document + where: $.x-ms-paths.*.*.responses.*.headers.x-ms-content-crc64 + transform: > + $["x-ms-client-name"] = "ContentCRC64" +``` + +``` yaml +directive: +- rename-model: + from: BlobItemInternal + to: BlobItem +- rename-model: + from: BlobPropertiesInternal + to: BlobProperties +``` + +### Updating encoding URL, Golang adds '+' which disrupts encoding with service + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) +``` + +### Change `where` parameter in blob filtering to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.FilterBlobsWhere + transform: > + $.required = true; +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Change CPK acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cpk/g, "CPK"); +``` + +### Change CORS acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Fix Content-Type header in submit batch request + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`); +``` + +### Fix response status code check in submit batch request + +``` yaml +directive: +- from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, + `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); +``` + +### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_blob_client.go + - zz_appendblob_client.go + - zz_blockblob_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go new file mode 100644 index 0000000000..343073b2e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "time" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +func (client *BlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlobClient) InternalClient() *azcore.Client { + return client.internal +} + +func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) +} + +func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) +} + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { + client := &BlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go new file mode 100644 index 0000000000..873d9a419f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -0,0 +1,32 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *BlockBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlockBlobClient) Internal() *azcore.Client { + return client.internal +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient { + client := &BlockBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go new file mode 100644 index 0000000000..57f112001b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go new file mode 100644 index 0000000000..114ab2d576 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +const ServiceVersion = "2024-11-04" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go new file mode 100644 index 0000000000..d43b2c7825 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *ContainerClient) Endpoint() string { + return client.endpoint +} + +func (client *ContainerClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { + client := &ContainerClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go new file mode 100644 index 0000000000..aaef9f53ba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go @@ -0,0 +1,141 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/url" +) + +type TransactionalContentSetter interface { + SetCRC64([]byte) + SetMD5([]byte) +} + +func (a *AppendBlobClientAppendBlockOptions) SetCRC64(v []byte) { + a.TransactionalContentCRC64 = v +} + +func (a *AppendBlobClientAppendBlockOptions) SetMD5(v []byte) { + a.TransactionalContentMD5 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetCRC64(v []byte) { + p.TransactionalContentCRC64 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { + p.TransactionalContentMD5 = v +} + +func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + +type SourceContentSetter interface { + SetSourceContentCRC64(v []byte) + SetSourceContentMD5(v []byte) +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + a.SourceContentcrc64 = v +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentMD5(v []byte) { + a.SourceContentMD5 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + b.SourceContentcrc64 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentMD5(v []byte) { + b.SourceContentMD5 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte) { + p.SourceContentcrc64 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { + p.SourceContentMD5 = v +} + +// Custom UnmarshalXML functions for types that need special handling. + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix. +func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobPrefix + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. +func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobItem + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go new file mode 100644 index 0000000000..a7c76208aa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *PageBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *PageBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient { + client := &PageBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go new file mode 100644 index 0000000000..32c15a2b09 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go new file mode 100644 index 0000000000..80c5d99d39 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -0,0 +1,659 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// AppendBlobClient contains the methods for the AppendBlob group. +// Don't use this type directly, use a constructor function instead. +type AppendBlobClient struct { + internal *azcore.Client + endpoint string +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - body - Initial data +// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + var err error + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockResponse{}, err + } + resp, err := client.appendBlockHandleResponse(httpResp) + return resp, err +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { + result := AppendBlobClientAppendBlockResponse{} + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - sourceURL - Specify a URL to the copy source. +// - contentLength - The length of the request. +// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + var err error + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.appendBlockFromURLHandleResponse(httpResp) + return resp, err +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { + result := AppendBlobClientAppendBlockFromURLResponse{} + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + var err error + req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientCreateResponse{}, err + } + resp, err := client.createHandleResponse(httpResp) + return resp, err +} + +// createCreateRequest creates the Create request. +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { + result := AppendBlobClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + var err error + req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientSealResponse{}, err + } + resp, err := client.sealHandleResponse(httpResp) + return resp, err +} + +// sealCreateRequest creates the Seal request. +func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { + result := AppendBlobClientSealResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go new file mode 100644 index 0000000000..9e99a4efab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -0,0 +1,2972 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// BlobClient contains the methods for the Blob group. +// Don't use this type directly, use a constructor function instead. +type BlobClient struct { + internal *azcore.Client + endpoint string +} + +// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination +// blob with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + var err error + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientAbortCopyFromURLResponse{}, err + } + resp, err := client.abortCopyFromURLHandleResponse(httpResp) + return resp, err +} + +// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. +func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. +func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobClientAbortCopyFromURLResponse, error) { + result := BlobClientAbortCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + var err error + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientAcquireLeaseResponse{}, err + } + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { + result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + var err error + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientBreakLeaseResponse{}, err + } + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { + result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + var err error + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientChangeLeaseResponse{}, err + } + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { + result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response +// until the copy is complete. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + var err error + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientCopyFromURLResponse{}, err + } + resp, err := client.copyFromURLHandleResponse(httpResp) + return resp, err +} + +// copyFromURLCreateRequest creates the CopyFromURL request. +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// copyFromURLHandleResponse handles the CopyFromURL response. +func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { + result := BlobClientCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + var err error + req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientCreateSnapshotResponse{}, err + } + resp, err := client.createSnapshotHandleResponse(httpResp) + return resp, err +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "snapshot") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { + result := BlobClientCreateSnapshotResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed +// from the storage account. If the storage account's soft delete feature is enabled, +// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service +// retains the blob or snapshot for the number of days specified by the +// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number +// of days has passed, the blob's data is permanently removed from the storage +// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use +// the List Blobs API and specify the "include=deleted" query parameter to discover +// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to +// return an HTTP status code of 404 (ResourceNotFound). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + var err error + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteResponse{}, err + } + resp, err := client.deleteHandleResponse(httpResp) + return resp, err +} + +// deleteCreateRequest creates the Delete request. +func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) + } + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientDeleteResponse, error) { + result := BlobClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + var err error + req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) + return resp, err +} + +// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. +func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. +func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientDeleteImmutabilityPolicyResponse, error) { + result := BlobClientDeleteImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You +// can also call Download to read a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + var err error + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientDownloadResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + err = runtime.NewResponseError(httpResp) + return BlobClientDownloadResponse{}, err + } + resp, err := client.downloadHandleResponse(httpResp) + return resp, err +} + +// downloadCreateRequest creates the Download request. +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} + } + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { + result := BlobClientDownloadResponse{Body: resp.Body} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.CreationTime = &creationTime + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LegalHold = &legalHold + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} + } + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + var err error + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetAccountInfoResponse{}, err + } + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { + result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties +// for the blob. It does not return the content of the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + var err error + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetPropertiesResponse{}, err + } + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { + result := BlobClientGetPropertiesResponse{} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierInferred = &accessTierInferred + } + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CreationTime = &creationTime + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ExpiresOn = &expiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsIncrementalCopy = &isIncrementalCopy + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LegalHold = &legalHold + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} + } + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// GetTags - The Get Tags operation enables users to get the tags associated with a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + var err error + req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetTagsResponse{}, err + } + resp, err := client.getTagsHandleResponse(httpResp) + return resp, err +} + +// getTagsCreateRequest creates the GetTags request. +func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getTagsHandleResponse handles the GetTags response. +func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClientGetTagsResponse, error) { + result := BlobClientGetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { + return BlobClientGetTagsResponse{}, err + } + return result, nil +} + +// Query - The Query operation enables users to select/project on blob data by providing simple query expressions. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + var err error + req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientQueryResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientQueryResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientQueryResponse{}, err + } + resp, err := client.queryHandleResponse(httpResp) + return resp, err +} + +// queryCreateRequest creates the Query request. +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "query") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.QueryRequest != nil { + if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { + return nil, err + } + return req, nil + } + return req, nil +} + +// queryHandleResponse handles the Query response. +func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { + result := BlobClientQueryResponse{Body: resp.Body} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + var err error + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientReleaseLeaseResponse{}, err + } + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { + result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + var err error + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientRenewLeaseResponse{}, err + } + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { + result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetExpiry - Sets the time a blob will expire and be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - expiryOptions - Required. Indicates mode of the expiry time +// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + var err error + req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetExpiryResponse{}, err + } + resp, err := client.setExpiryHandleResponse(httpResp) + return resp, err +} + +// setExpiryCreateRequest creates the SetExpiry request. +func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "expiry") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} + if options != nil && options.ExpiresOn != nil { + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// setExpiryHandleResponse handles the SetExpiry response. +func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { + result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + var err error + req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetHTTPHeadersResponse{}, err + } + resp, err := client.setHTTPHeadersHandleResponse(httpResp) + return resp, err +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { + result := BlobClientSetHTTPHeadersResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + var err error + req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetImmutabilityPolicyResponse{}, err + } + resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) + return resp, err +} + +// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. +func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. +func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientSetImmutabilityPolicyResponse, error) { + result := BlobClientSetImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - legalHold - Specified if a legal hold should be set on the blob. +// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + var err error + req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetLegalHoldResponse{}, err + } + resp, err := client.setLegalHoldHandleResponse(httpResp) + return resp, err +} + +// setLegalHoldCreateRequest creates the SetLegalHold request. +func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "legalhold") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// setLegalHoldHandleResponse handles the SetLegalHold response. +func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobClientSetLegalHoldResponse, error) { + result := BlobClientSetLegalHoldResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.LegalHold = &legalHold + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value +// pairs +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + var err error + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetMetadataResponse{}, err + } + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { + result := BlobClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// SetTags - The Set Tags operation enables users to set tags on a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - tags - Blob tags +// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + var err error + req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTagsResponse{}, err + } + resp, err := client.setTagsHandleResponse(httpResp) + return resp, err +} + +// setTagsCreateRequest creates the SetTags request. +func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := runtime.MarshalAsXML(req, tags); err != nil { + return nil, err + } + return req, nil +} + +// setTagsHandleResponse handles the SetTags response. +func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClientSetTagsResponse, error) { + result := BlobClientSetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A +// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - tier - Indicates the tier to be set on the blob. +// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + var err error + req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetTierResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetTierResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTierResponse{}, err + } + resp, err := client.setTierHandleResponse(httpResp) + return resp, err +} + +// setTierCreateRequest creates the SetTier request. +func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tier") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// setTierHandleResponse handles the SetTier response. +func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClientSetTierResponse, error) { + result := BlobClientSetTierResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + var err error + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientStartCopyFromURLResponse{}, err + } + resp, err := client.startCopyFromURLHandleResponse(httpResp) + return resp, err +} + +// startCopyFromURLCreateRequest creates the StartCopyFromURL request. +func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// startCopyFromURLHandleResponse handles the StartCopyFromURL response. +func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { + result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// Undelete - Undelete a blob that was previously soft deleted +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + var err error + req, err := client.undeleteCreateRequest(ctx, options) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientUndeleteResponse{}, err + } + resp, err := client.undeleteHandleResponse(httpResp) + return resp, err +} + +// undeleteCreateRequest creates the Undelete request. +func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *BlobClientUndeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// undeleteHandleResponse handles the Undelete response. +func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClientUndeleteResponse, error) { + result := BlobClientUndeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go new file mode 100644 index 0000000000..6768a1143f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -0,0 +1,997 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// BlockBlobClient contains the methods for the BlockBlob group. +// Don't use this type directly, use a constructor function instead. +type BlockBlobClient struct { + internal *azcore.Client + endpoint string +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - blocks - Blob Blocks. +// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + var err error + req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.commitBlockListHandleResponse(httpResp) + return resp, err +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := runtime.MarshalAsXML(req, blocks); err != nil { + return nil, err + } + return req, nil +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { + result := BlockBlobClientCommitBlockListResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + var err error + req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientGetBlockListResponse{}, err + } + resp, err := client.getBlockListHandleResponse(httpResp) + return resp, err +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("blocklisttype", string(listType)) + reqQP.Set("comp", "blocklist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { + result := BlockBlobClientGetBlockListResponse{} + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + var err error + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.putBlobFromURLHandleResponse(httpResp) + return resp, err +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { + result := BlockBlobClientPutBlobFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + var err error + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockResponse{}, err + } + resp, err := client.stageBlockHandleResponse(httpResp) + return resp, err +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { + result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - sourceURL - Specify a URL to the copy source. +// - options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + var err error + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.stageBlockFromURLHandleResponse(httpResp) + return resp, err +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { + result := BlockBlobClientStageBlockFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + var err error + req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientUploadResponse{}, err + } + resp, err := client.uploadHandleResponse(httpResp) + return resp, err +} + +// uploadCreateRequest creates the Upload request. +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// uploadHandleResponse handles the Upload response. +func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { + result := BlockBlobClientUploadResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go new file mode 100644 index 0000000000..3dd77291ff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -0,0 +1,746 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +type AccessTier string + +const ( + AccessTierArchive AccessTier = "Archive" + AccessTierCold AccessTier = "Cold" + AccessTierCool AccessTier = "Cool" + AccessTierHot AccessTier = "Hot" + AccessTierP10 AccessTier = "P10" + AccessTierP15 AccessTier = "P15" + AccessTierP20 AccessTier = "P20" + AccessTierP30 AccessTier = "P30" + AccessTierP4 AccessTier = "P4" + AccessTierP40 AccessTier = "P40" + AccessTierP50 AccessTier = "P50" + AccessTierP6 AccessTier = "P6" + AccessTierP60 AccessTier = "P60" + AccessTierP70 AccessTier = "P70" + AccessTierP80 AccessTier = "P80" + AccessTierPremium AccessTier = "Premium" +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{ + AccessTierArchive, + AccessTierCold, + AccessTierCool, + AccessTierHot, + AccessTierP10, + AccessTierP15, + AccessTierP20, + AccessTierP30, + AccessTierP4, + AccessTierP40, + AccessTierP50, + AccessTierP6, + AccessTierP60, + AccessTierP70, + AccessTierP80, + AccessTierPremium, + } +} + +type AccountKind string + +const ( + AccountKindBlobStorage AccountKind = "BlobStorage" + AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindStorage AccountKind = "Storage" + AccountKindStorageV2 AccountKind = "StorageV2" +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return []AccountKind{ + AccountKindBlobStorage, + AccountKindBlockBlobStorage, + AccountKindFileStorage, + AccountKindStorage, + AccountKindStorageV2, + } +} + +type ArchiveStatus string + +const ( + ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold" + ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" + ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCold, + ArchiveStatusRehydratePendingToCool, + ArchiveStatusRehydratePendingToHot, + } +} + +type BlobCopySourceTags string + +const ( + BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY" + BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE" +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return []BlobCopySourceTags{ + BlobCopySourceTagsCOPY, + BlobCopySourceTagsREPLACE, + } +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus string + +const ( + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return []BlobGeoReplicationStatus{ + BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusLive, + BlobGeoReplicationStatusUnavailable, + } +} + +type BlobType string + +const ( + BlobTypeAppendBlob BlobType = "AppendBlob" + BlobTypeBlockBlob BlobType = "BlockBlob" + BlobTypePageBlob BlobType = "PageBlob" +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{ + BlobTypeAppendBlob, + BlobTypeBlockBlob, + BlobTypePageBlob, + } +} + +type BlockListType string + +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{ + BlockListTypeAll, + BlockListTypeCommitted, + BlockListTypeUncommitted, + } +} + +type CopyStatusType string + +const ( + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypeAborted, + CopyStatusTypeFailed, + CopyStatusTypePending, + CopyStatusTypeSuccess, + } +} + +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeOnly, + } +} + +type DeleteType string + +const ( + DeleteTypeNone DeleteType = "None" + DeleteTypePermanent DeleteType = "Permanent" +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return []DeleteType{ + DeleteTypeNone, + DeleteTypePermanent, + } +} + +type EncryptionAlgorithmType string + +const ( + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{ + EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, + } +} + +type ExpiryOptions string + +const ( + ExpiryOptionsAbsolute ExpiryOptions = "Absolute" + ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" + ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" + ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return []ExpiryOptions{ + ExpiryOptionsAbsolute, + ExpiryOptionsNeverExpire, + ExpiryOptionsRelativeToCreation, + ExpiryOptionsRelativeToNow, + } +} + +type FilterBlobsIncludeItem string + +const ( + FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none" + FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions" +) + +// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type. +func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem { + return []FilterBlobsIncludeItem{ + FilterBlobsIncludeItemNone, + FilterBlobsIncludeItemVersions, + } +} + +type ImmutabilityPolicyMode string + +const ( + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeLocked, + ImmutabilityPolicyModeMutable, + ImmutabilityPolicyModeUnlocked, + } +} + +type ImmutabilityPolicySetting string + +const ( + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return []ImmutabilityPolicySetting{ + ImmutabilityPolicySettingLocked, + ImmutabilityPolicySettingUnlocked, + } +} + +type LeaseDurationType string + +const ( + LeaseDurationTypeFixed LeaseDurationType = "fixed" + LeaseDurationTypeInfinite LeaseDurationType = "infinite" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeFixed, + LeaseDurationTypeInfinite, + } +} + +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeLeased LeaseStateType = "leased" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + LeaseStateTypeExpired, + LeaseStateTypeLeased, + } +} + +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +type ListBlobsIncludeItem string + +const ( + ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" + ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemPermissions ListBlobsIncludeItem = "permissions" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" +) + +// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. +func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { + return []ListBlobsIncludeItem{ + ListBlobsIncludeItemCopy, + ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemDeletedwithversions, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemPermissions, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, + } +} + +type ListContainersIncludeType string + +const ( + ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" + ListContainersIncludeTypeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ + ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeMetadata, + ListContainersIncludeTypeSystem, + } +} + +type PremiumPageBlobAccessTier string + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = "P10" + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = "P15" + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = "P20" + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = "P30" + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = "P4" + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = "P40" + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = "P50" + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = "P6" + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = "P60" + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = "P70" + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = "P80" +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return []PremiumPageBlobAccessTier{ + PremiumPageBlobAccessTierP10, + PremiumPageBlobAccessTierP15, + PremiumPageBlobAccessTierP20, + PremiumPageBlobAccessTierP30, + PremiumPageBlobAccessTierP4, + PremiumPageBlobAccessTierP40, + PremiumPageBlobAccessTierP50, + PremiumPageBlobAccessTierP6, + PremiumPageBlobAccessTierP60, + PremiumPageBlobAccessTierP70, + PremiumPageBlobAccessTierP80, + } +} + +type PublicAccessType string + +const ( + PublicAccessTypeBlob PublicAccessType = "blob" + PublicAccessTypeContainer PublicAccessType = "container" +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{ + PublicAccessTypeBlob, + PublicAccessTypeContainer, + } +} + +// QueryFormatType - The quick query format type. +type QueryFormatType string + +const ( + QueryFormatTypeArrow QueryFormatType = "arrow" + QueryFormatTypeDelimited QueryFormatType = "delimited" + QueryFormatTypeJSON QueryFormatType = "json" + QueryFormatTypeParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{ + QueryFormatTypeArrow, + QueryFormatTypeDelimited, + QueryFormatTypeJSON, + QueryFormatTypeParquet, + } +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority string + +const ( + RehydratePriorityHigh RehydratePriority = "High" + RehydratePriorityStandard RehydratePriority = "Standard" +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return []RehydratePriority{ + RehydratePriorityHigh, + RehydratePriorityStandard, + } +} + +type SKUName string + +const ( + SKUNamePremiumLRS SKUName = "Premium_LRS" + SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNameStandardRAGRS SKUName = "Standard_RAGRS" + SKUNameStandardZRS SKUName = "Standard_ZRS" +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return []SKUName{ + SKUNamePremiumLRS, + SKUNameStandardGRS, + SKUNameStandardLRS, + SKUNameStandardRAGRS, + SKUNameStandardZRS, + } +} + +type SequenceNumberActionType string + +const ( + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" + SequenceNumberActionTypeMax SequenceNumberActionType = "max" + SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{ + SequenceNumberActionTypeIncrement, + SequenceNumberActionTypeMax, + SequenceNumberActionTypeUpdate, + } +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" + StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" + StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCode = "BlobImmutableDueToPolicy" + StorageErrorCodeBlobNotArchived StorageErrorCode = "BlobNotArchived" + StorageErrorCodeBlobNotFound StorageErrorCode = "BlobNotFound" + StorageErrorCodeBlobOverwritten StorageErrorCode = "BlobOverwritten" + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCode = "BlobTierInadequateForContentLength" + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCode = "BlobUsesCustomerSpecifiedEncryption" + StorageErrorCodeBlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" + StorageErrorCodeBlockListTooLong StorageErrorCode = "BlockListTooLong" + StorageErrorCodeCannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" + StorageErrorCodeCannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerAlreadyExists StorageErrorCode = "ContainerAlreadyExists" + StorageErrorCodeContainerBeingDeleted StorageErrorCode = "ContainerBeingDeleted" + StorageErrorCodeContainerDisabled StorageErrorCode = "ContainerDisabled" + StorageErrorCodeContainerNotFound StorageErrorCode = "ContainerNotFound" + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" + StorageErrorCodeCopyIDMismatch StorageErrorCode = "CopyIdMismatch" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidBlobOrBlock StorageErrorCode = "InvalidBlobOrBlock" + StorageErrorCodeInvalidBlobTier StorageErrorCode = "InvalidBlobTier" + StorageErrorCodeInvalidBlobType StorageErrorCode = "InvalidBlobType" + StorageErrorCodeInvalidBlockID StorageErrorCode = "InvalidBlockId" + StorageErrorCodeInvalidBlockList StorageErrorCode = "InvalidBlockList" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidOperation StorageErrorCode = "InvalidOperation" + StorageErrorCodeInvalidPageRange StorageErrorCode = "InvalidPageRange" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidSourceBlobType StorageErrorCode = "InvalidSourceBlobType" + StorageErrorCodeInvalidSourceBlobURL StorageErrorCode = "InvalidSourceBlobUrl" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCode = "InvalidVersionForPageBlobOperation" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeLeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" + StorageErrorCodeLeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCode = "LeaseIdMismatchWithBlobOperation" + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCode = "LeaseIdMismatchWithContainerOperation" + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" + StorageErrorCodeLeaseIDMissing StorageErrorCode = "LeaseIdMissing" + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" + StorageErrorCodeLeaseLost StorageErrorCode = "LeaseLost" + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCode = "LeaseNotPresentWithBlobOperation" + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCode = "LeaseNotPresentWithContainerOperation" + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCode = "MaxBlobSizeConditionNotMet" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeNoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" + StorageErrorCodeNoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCode = "OperationNotAllowedOnIncrementalCopyBlob" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodePendingCopyOperation StorageErrorCode = "PendingCopyOperation" + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" + StorageErrorCodePreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" + StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent" + StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" + StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse" + StorageErrorCodeTargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCode = "UnauthorizedBlobOverwrite" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAppendPositionConditionNotMet, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeBlobAlreadyExists, + StorageErrorCodeBlobArchived, + StorageErrorCodeBlobBeingRehydrated, + StorageErrorCodeBlobImmutableDueToPolicy, + StorageErrorCodeBlobNotArchived, + StorageErrorCodeBlobNotFound, + StorageErrorCodeBlobOverwritten, + StorageErrorCodeBlobTierInadequateForContentLength, + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, + StorageErrorCodeBlockCountExceedsLimit, + StorageErrorCodeBlockListTooLong, + StorageErrorCodeCannotChangeToLowerTier, + StorageErrorCodeCannotVerifyCopySource, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerAlreadyExists, + StorageErrorCodeContainerBeingDeleted, + StorageErrorCodeContainerDisabled, + StorageErrorCodeContainerNotFound, + StorageErrorCodeContentLengthLargerThanTierLimit, + StorageErrorCodeCopyAcrossAccountsNotSupported, + StorageErrorCodeCopyIDMismatch, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeIncrementalCopyBlobMismatch, + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopySourceMustBeSnapshot, + StorageErrorCodeInfiniteLeaseDurationRequired, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidBlobOrBlock, + StorageErrorCodeInvalidBlobTier, + StorageErrorCodeInvalidBlobType, + StorageErrorCodeInvalidBlockID, + StorageErrorCodeInvalidBlockList, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidOperation, + StorageErrorCodeInvalidPageRange, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidSourceBlobType, + StorageErrorCodeInvalidSourceBlobURL, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidVersionForPageBlobOperation, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeLeaseAlreadyBroken, + StorageErrorCodeLeaseAlreadyPresent, + StorageErrorCodeLeaseIDMismatchWithBlobOperation, + StorageErrorCodeLeaseIDMismatchWithContainerOperation, + StorageErrorCodeLeaseIDMismatchWithLeaseOperation, + StorageErrorCodeLeaseIDMissing, + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, + StorageErrorCodeLeaseLost, + StorageErrorCodeLeaseNotPresentWithBlobOperation, + StorageErrorCodeLeaseNotPresentWithContainerOperation, + StorageErrorCodeLeaseNotPresentWithLeaseOperation, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMaxBlobSizeConditionNotMet, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeNoAuthenticationInformation, + StorageErrorCodeNoPendingCopyOperation, + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodePendingCopyOperation, + StorageErrorCodePreviousSnapshotCannotBeNewer, + StorageErrorCodePreviousSnapshotNotFound, + StorageErrorCodePreviousSnapshotOperationNotSupported, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeSequenceNumberConditionNotMet, + StorageErrorCodeSequenceNumberIncrementTooLarge, + StorageErrorCodeServerBusy, + StorageErrorCodeSnapshotCountExceeded, + StorageErrorCodeSnapshotOperationRateExceeded, + StorageErrorCodeSnapshotsPresent, + StorageErrorCodeSourceConditionNotMet, + StorageErrorCodeSystemInUse, + StorageErrorCodeTargetConditionNotMet, + StorageErrorCodeUnauthorizedBlobOverwrite, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go new file mode 100644 index 0000000000..683b40a255 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -0,0 +1,1601 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ContainerClient contains the methods for the Container group. +// Don't use this type directly, use a constructor function instead. +type ContainerClient struct { + internal *azcore.Client + endpoint string +} + +// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + var err error + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientAcquireLeaseResponse{}, err + } + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { + result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + var err error + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientBreakLeaseResponse{}, err + } + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { + result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + var err error + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientChangeLeaseResponse{}, err + } + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { + result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Create - creates a new container under the specified account. If the container with the same name already exists, the operation +// fails +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + var err error + req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) + if err != nil { + return ContainerClientCreateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientCreateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientCreateResponse{}, err + } + resp, err := client.createHandleResponse(httpResp) + return resp, err +} + +// createCreateRequest creates the Create request. +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope} + } + if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { + result := ContainerClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later +// deleted during garbage collection +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + var err error + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientDeleteResponse{}, err + } + resp, err := client.deleteHandleResponse(httpResp) + return resp, err +} + +// deleteCreateRequest creates the Delete request. +func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (ContainerClientDeleteResponse, error) { + result := ContainerClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression. +// Filter blobs searches within the given container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientFilterBlobsResponse{}, err + } + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blobs") + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) { + result := ContainerClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may +// be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + var err error + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccessPolicyResponse{}, err + } + resp, err := client.getAccessPolicyHandleResponse(httpResp) + return resp, err +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerClientGetAccessPolicyResponse, error) { + result := ContainerClientGetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo +// method. +func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + var err error + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccountInfoResponse{}, err + } + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { + result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + var err error + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetPropertiesResponse{}, err + } + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { + result := ContainerClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { + hasImmutabilityPolicy, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasImmutabilityPolicy = &hasImmutabilityPolicy + } + if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { + hasLegalHold, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasLegalHold = &hasLegalHold + } + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// Generated from API version 2024-08-04 +// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +// +// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. +func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. +func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { + result := ContainerClientListBlobFlatSegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + return result, nil +} + +// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// Generated from API version 2024-08-04 +// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// - options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { + return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ + More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextMarker + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, nil) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + return client.ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + reqQP.Set("delimiter", delimiter) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { + result := ContainerClientListBlobHierarchySegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + var err error + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientReleaseLeaseResponse{}, err + } + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { + result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Rename - Renames an existing container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - sourceContainerName - Required. Specifies the name of the container to rename. +// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + var err error + req, err := client.renameCreateRequest(ctx, sourceContainerName, options) + if err != nil { + return ContainerClientRenameResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRenameResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenameResponse{}, err + } + resp, err := client.renameHandleResponse(httpResp) + return resp, err +} + +// renameCreateRequest creates the Rename request. +func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "rename") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-source-container-name"] = []string{sourceContainerName} + if options != nil && options.SourceLeaseID != nil { + req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// renameHandleResponse handles the Rename response. +func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) { + result := ContainerClientRenameResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenameResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + var err error + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenewLeaseResponse{}, err + } + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { + result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Restore - Restores a previously-deleted container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + var err error + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRestoreResponse{}, err + } + resp, err := client.restoreHandleResponse(httpResp) + return resp, err +} + +// restoreCreateRequest creates the Restore request. +func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options *ContainerClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.DeletedContainerName != nil { + req.Raw().Header["x-ms-deleted-container-name"] = []string{*options.DeletedContainerName} + } + if options != nil && options.DeletedContainerVersion != nil { + req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (ContainerClientRestoreResponse, error) { + result := ContainerClientRestoreResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container +// may be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - containerACL - the acls for the container +// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + var err error + req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetAccessPolicyResponse{}, err + } + resp, err := client.setAccessPolicyHandleResponse(httpResp) + return resp, err +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { + return nil, err + } + return req, nil +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { + result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + var err error + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetMetadataResponse{}, err + } + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { + result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + var err error + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSubmitBatchResponse{}, err + } + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "batch") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ContainerClient) submitBatchHandleResponse(resp *http.Response) (ContainerClientSubmitBatchResponse, error) { + result := ContainerClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go new file mode 100644 index 0000000000..7530718389 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -0,0 +1,546 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + + // the permissions for the acl policy + Permission *string `xml:"Permission"` + + // the date-time the policy is active + Start *time.Time `xml:"Start"` +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. +type ArrowConfiguration struct { + // REQUIRED + Schema []*ArrowField `xml:"Schema>Field"` +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // REQUIRED + Type *string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +type BlobFlatListSegment struct { + // REQUIRED + BlobItems []*BlobItem `xml:"Blob"` +} + +type BlobHierarchyListSegment struct { + // REQUIRED + BlobItems []*BlobItem `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +} + +// BlobItem - An Azure Storage blob +type BlobItem struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *BlobProperties `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +type BlobName struct { + // The name of the blob. + Content *string `xml:",chardata"` + + // Indicates if the blob name is encoded. + Encoded *bool `xml:"Encoded,attr"` +} + +type BlobPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// BlobProperties - Properties of a blob +type BlobProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + ACL *string `xml:"Acl"` + AccessTier *AccessTier `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus *ArchiveStatus `xml:"ArchiveStatus"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType *BlobType `xml:"BlobType"` + CacheControl *string `xml:"Cache-Control"` + ContentDisposition *string `xml:"Content-Disposition"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + + // Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentType *string `xml:"Content-Type"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyID *string `xml:"CopyId"` + CopyProgress *string `xml:"CopyProgress"` + CopySource *string `xml:"CopySource"` + CopyStatus *CopyStatusType `xml:"CopyStatus"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + CreationTime *time.Time `xml:"Creation-Time"` + CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` + DeletedTime *time.Time `xml:"DeletedTime"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + Group *string `xml:"Group"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` + Owner *string `xml:"Owner"` + Permissions *string `xml:"Permissions"` + + // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High + // and Standard. + RehydratePriority *RehydratePriority `xml:"RehydratePriority"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ResourceType *string `xml:"ResourceType"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + TagCount *int32 `xml:"TagCount"` +} + +type BlobTag struct { + // REQUIRED + Key *string `xml:"Key"` + + // REQUIRED + Value *string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + // REQUIRED + BlobTagSet []*BlobTag `xml:"TagSet>Tag"` +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // REQUIRED; The base64 encoded block ID. + Name *string `xml:"Name"` + + // REQUIRED; The block size in bytes. + Size *int64 `xml:"Size"` +} + +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *ContainerProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CORSRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // REQUIRED + ContainerName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` + VersionID *string `xml:"VersionId"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterBlobItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + // REQUIRED + ContainerItems []*ContainerItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` + + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` + + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` +} + +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // parquet configuration + ParquetTextConfiguration any `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // CONSTANT; Required. The type of the provided query expression. + // Field has constant value "SQL", any specified value is ignored. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +type StorageError struct { + Message *string +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + CORS []*CORSRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOID *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTID *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go new file mode 100644 index 0000000000..e2e64d6fff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -0,0 +1,489 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*dateTimeRFC3339)(a.Expiry), + Start: (*dateTimeRFC3339)(a.Start), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.Expiry != nil && !(*time.Time)(aux.Expiry).IsZero() { + a.Expiry = (*time.Time)(aux.Expiry) + } + if aux.Start != nil && !(*time.Time)(aux.Start).IsZero() { + a.Start = (*time.Time)(aux.Start) + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItem `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. +func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobHierarchyListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItem `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + if b.BlobPrefixes != nil { + aux.BlobPrefixes = &b.BlobPrefixes + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobProperties. +func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobProperties + aux := &struct { + *alias + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), + CreationTime: (*dateTimeRFC1123)(b.CreationTime), + DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), + ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), + LastModified: (*dateTimeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) + aux.ContentMD5 = &encodedContentMD5 + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobProperties. +func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobProperties + aux := &struct { + *alias + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.AccessTierChangeTime != nil && !(*time.Time)(aux.AccessTierChangeTime).IsZero() { + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + } + if aux.ContentMD5 != nil { + if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { + return err + } + } + if aux.CopyCompletionTime != nil && !(*time.Time)(aux.CopyCompletionTime).IsZero() { + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + } + if aux.CreationTime != nil && !(*time.Time)(aux.CreationTime).IsZero() { + b.CreationTime = (*time.Time)(aux.CreationTime) + } + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + b.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.ExpiresOn != nil && !(*time.Time)(aux.ExpiresOn).IsZero() { + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + } + if aux.ImmutabilityPolicyExpiresOn != nil && !(*time.Time)(aux.ImmutabilityPolicyExpiresOn).IsZero() { + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + } + if aux.LastAccessedOn != nil && !(*time.Time)(aux.LastAccessedOn).IsZero() { + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + b.LastModified = (*time.Time)(aux.LastModified) + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. +func (c *ContainerItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias ContainerItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. +func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), + LastModified: (*dateTimeRFC1123)(c.LastModified), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. +func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + c.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + c.LastModified = (*time.Time)(aux.LastModified) + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.LastSyncTime != nil && !(*time.Time)(aux.LastSyncTime).IsZero() { + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. +func (l ListContainersSegmentResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListContainersSegmentResponse + aux := &struct { + *alias + ContainerItems *[]*ContainerItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.ContainerItems != nil { + aux.ContainerItems = &l.ContainerItems + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return enc.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + CORS *[]*CORSRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.CORS != nil { + aux.CORS = &s.CORS + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), + SignedStart: (*dateTimeRFC3339)(u.SignedStart), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.SignedExpiry != nil && !(*time.Time)(aux.SignedExpiry).IsZero() { + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + } + if aux.SignedStart != nil && !(*time.Time)(aux.SignedStart).IsZero() { + u.SignedStart = (*time.Time)(aux.SignedStart) + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil || string(data) == "null" { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go new file mode 100644 index 0000000000..89d05310a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -0,0 +1,1484 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go new file mode 100644 index 0000000000..ee46ef9ca0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -0,0 +1,1292 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// PageBlobClient contains the methods for the PageBlob group. +// Don't use this type directly, use a constructor function instead. +type PageBlobClient struct { + internal *azcore.Client + endpoint string +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + var err error + req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientClearPagesResponse{}, err + } + resp, err := client.clearPagesHandleResponse(httpResp) + return resp, err +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { + result := PageBlobClientClearPagesResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + var err error + req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.copyIncrementalHandleResponse(httpResp) + return resp, err +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { + result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + var err error + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCreateResponse{}, err + } + resp, err := client.createHandleResponse(httpResp) + return resp, err +} + +// createCreateRequest creates the Create request. +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { + result := PageBlobClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot +// of a page blob +// +// Generated from API version 2024-08-04 +// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ + More: func(page PageBlobClientGetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextMarker + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return client.GetPageRangesHandleResponse(resp) + }, + }) +} + +// GetPageRangesCreateRequest creates the GetPageRanges request. +func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// GetPageRangesHandleResponse handles the GetPageRanges response. +func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { + result := PageBlobClientGetPageRangesResponse{} + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that +// were changed between target blob and previous snapshot. +// +// Generated from API version 2024-08-04 +// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ + More: func(page PageBlobClientGetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextMarker + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return client.GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// GetPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) + } + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { + result := PageBlobClientGetPageRangesDiffResponse{} + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + var err error + req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientResizeResponse{}, err + } + resp, err := client.resizeHandleResponse(httpResp) + return resp, err +} + +// resizeCreateRequest creates the Resize request. +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { + result := PageBlobClientResizeResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + var err error + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.updateSequenceNumberHandleResponse(httpResp) + return resp, err +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { + result := PageBlobClientUpdateSequenceNumberResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - body - Initial data +// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + var err error + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesResponse{}, err + } + resp, err := client.uploadPagesHandleResponse(httpResp) + return resp, err +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-page-write"] = []string{"update"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { + result := PageBlobClientUploadPagesResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - sourceURL - Specify a URL to the copy source. +// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// - contentLength - The length of the request. +// - rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// - options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + var err error + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.uploadPagesFromURLHandleResponse(httpResp) + return resp, err +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { + result := PageBlobClientUploadPagesFromURLResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go new file mode 100644 index 0000000000..ff9f3d3de1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go @@ -0,0 +1,2022 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobClientAppendBlockFromURLResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobClientAppendBlockResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobClientSealResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type BlobClientAbortCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type BlobClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BlobClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type BlobClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type BlobClientCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type BlobClientCreateSnapshotResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. +type BlobClientDeleteImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDeleteResponse contains the response from method BlobClient.Delete. +type BlobClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDownloadResponse contains the response from method BlobClient.Download. +type BlobClientDownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type BlobClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. +type BlobClientGetPropertiesResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. +type BlobClientGetTagsResponse struct { + // Blob tags + BlobTags + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientQueryResponse contains the response from method BlobClient.Query. +type BlobClientQueryResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. +type BlobClientSetExpiryResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobClientSetHTTPHeadersResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type BlobClientSetImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiry *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type BlobClientSetLegalHoldResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. +type BlobClientSetTagsResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. +type BlobClientSetTierResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobClientStartCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobClientUndeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobClientCommitBlockListResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobClientGetBlockListResponse struct { + BlockList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. +type BlockBlobClientPutBlobFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobClientStageBlockFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobClientStageBlockResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobClientUploadResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. +type ContainerClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. +type ContainerClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. +type ContainerClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientCreateResponse contains the response from method ContainerClient.Create. +type ContainerClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. +type ContainerClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerClientGetAccessPolicyResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. +type ContainerClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. +type ContainerClientGetPropertiesResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. +type ContainerClientListBlobFlatSegmentResponse struct { + // An enumeration of blobs + ListBlobsFlatSegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. +type ContainerClientListBlobHierarchySegmentResponse struct { + // An enumeration of blobs + ListBlobsHierarchySegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. +type ContainerClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. +type ContainerClientRenameResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. +type ContainerClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. +type ContainerClientRestoreResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. +type ContainerClientSetAccessPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. +type ContainerClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. +type ContainerClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. +type PageBlobClientClearPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. +type PageBlobClientCopyIncrementalResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. +type PageBlobClientGetPageRangesDiffResponse struct { + // the list of pages + PageList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. +type PageBlobClientGetPageRangesResponse struct { + // the list of pages + PageList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. +type PageBlobClientResizeResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. +type PageBlobClientUpdateSequenceNumberResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. +type PageBlobClientUploadPagesFromURLResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobClientUploadPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. +type ServiceClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. +type ServiceClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + // Storage Service Properties. + StorageServiceProperties + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { + // Stats for the storage service. + StorageServiceStats + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type ServiceClientGetUserDelegationKeyResponse struct { + // A user delegation key + UserDelegationKey + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. +type ServiceClientListContainersSegmentResponse struct { + // An enumeration of containers + ListContainersSegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. +type ServiceClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go new file mode 100644 index 0000000000..dcf51a9d56 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -0,0 +1,583 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use a constructor function instead. +type ServiceClient struct { + internal *azcore.Client + endpoint string +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search +// expression. Filter blobs searches across all containers within a storage account but can +// be scoped within the expression to a single container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientFilterBlobsResponse{}, err + } + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blobs") + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) { + result := ServiceClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + var err error + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetAccountInfoResponse{}, err + } + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { + result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + var err error + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetPropertiesResponse{}, err + } + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary +// location endpoint when read-access geo-redundant replication is enabled for the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + var err error + req, err := client.getStatisticsCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetStatisticsResponse{}, err + } + resp, err := client.getStatisticsHandleResponse(httpResp) + return resp, err +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "stats") + reqQP.Set("restype", "service") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { + result := ServiceClientGetStatisticsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + return result, nil +} + +// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - keyInfo - Key information +// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey +// method. +func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + var err error + req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetUserDelegationKeyResponse{}, err + } + resp, err := client.getUserDelegationKeyHandleResponse(httpResp) + return resp, err +} + +// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. +func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "userdelegationkey") + reqQP.Set("restype", "service") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := runtime.MarshalAsXML(req, keyInfo); err != nil { + return nil, err + } + return req, nil +} + +// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. +func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) { + result := ServiceClientGetUserDelegationKeyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + return result, nil +} + +// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified +// account +// +// Generated from API version 2024-08-04 +// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +// +// listContainersSegmentCreateRequest creates the ListContainersSegment request. +func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// listContainersSegmentHandleResponse handles the ListContainersSegment response. +func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListContainersSegmentResponse, error) { + result := ServiceClientListContainersSegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { + return ServiceClientListContainersSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + var err error + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSetPropertiesResponse{}, err + } + resp, err := client.setPropertiesHandleResponse(httpResp) + return resp, err +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { + return nil, err + } + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-08-04 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + var err error + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSubmitBatchResponse{}, err + } + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) { + result := ServiceClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go new file mode 100644 index 0000000000..ee3732ebcc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "strings" + "time" +) + +const ( + dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` +) + +type dateTimeRFC1123 time.Time + +func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) + return b, nil +} + +func (t dateTimeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) + *t = dateTimeRFC1123(p) + return err +} + +func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + p, err := time.Parse(time.RFC1123, string(data)) + *t = dateTimeRFC1123(p) + return err +} + +func (t dateTimeRFC1123) String() string { + return time.Time(t).Format(time.RFC1123) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go new file mode 100644 index 0000000000..e9eac9bcb1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "regexp" + "strings" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) + +const ( + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` +) + +type dateTimeRFC3339 time.Time + +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { + layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { + layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = dateTimeRFC3339(p) + return err +} + +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go new file mode 100644 index 0000000000..355d0176b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -0,0 +1,50 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "encoding/xml" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + tokValue := "" + for { + t, err := d.Token() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + tokValue = "" + case xml.CharData: + if tokName == "" { + continue + } + tokValue = string(tt) + case xml.EndElement: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + (*ap)[tokName] = to.Ptr(tokValue) + tokName = "" + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go new file mode 100644 index 0000000000..5c44af34ae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -0,0 +1,80 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +const ( + DefaultConcurrency = 5 +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + NumChunks uint64 + Concurrency uint16 + Operation func(ctx context.Context, offset int64, chunkSize int64) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = DefaultConcurrency // default concurrency + } + + // Prepare and do parallel operations. + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, o.NumChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == o.NumChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + operationChannel <- func() error { + return o.Operation(ctx, offset, curChunkSize) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go new file mode 100644 index 0000000000..e3aa4a4886 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +type BufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan Mmb + count int + max int + size int64 +} + +func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] { + return &mmbPool{ + buffers: make(chan Mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan Mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := NewMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer Mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.Delete() + } + pool.count = 0 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go new file mode 100644 index 0000000000..8d4d35bdef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" +) + +type bytesWriter []byte + +func NewBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go new file mode 100644 index 0000000000..fff61016c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -0,0 +1,115 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +type storageAuthorizer struct { + scopes []string + tenantID string +} + +func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string, allowHTTP bool) policy.Policy { + s := storageAuthorizer{scopes: []string{audience}} + return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: s.onRequest, + OnChallenge: s.onChallenge, + }, + InsecureAllowCredentialWithHTTP: allowHTTP, + }) +} + +func (s *storageAuthorizer) onRequest(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +func (s *storageAuthorizer) onChallenge(req *policy.Request, resp *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + err := s.parseChallenge(resp) + if err != nil { + return err + } + // TODO: Set tenantID when policy.TokenRequestOptions supports it. https://github.com/Azure/azure-sdk-for-go/issues/19841 + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/authorize +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + if len(parts) >= 3 { + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant + } else { + return "" + } +} + +func (s *storageAuthorizer) parseChallenge(resp *http.Response) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization_uri=\"\" resource_id=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + s.tenantID = parseTenant(vals["authorization_uri"]) + + scope := vals["resource_id"] + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + s.scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go new file mode 100644 index 0000000000..cdcadf3116 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go @@ -0,0 +1,38 @@ +//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) +// +build go1.18 +// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "fmt" + "os" + "syscall" +) + +// mmb is a memory mapped buffer +type Mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { + prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE + addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) + if err != nil { + return nil, os.NewSyscallError("Mmap", err) + } + return Mmb(addr), nil +} + +// delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("Munmap error: %v", err)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go new file mode 100644 index 0000000000..ef9fdc2a1f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go @@ -0,0 +1,56 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "fmt" + "os" + "reflect" + "syscall" + "unsafe" +) + +// Mmb is a memory mapped buffer +type Mmb []byte + +// NewMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { + const InvalidHandleValue = ^uintptr(0) // -1 + + prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil) + if err != nil { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + defer func() { + _ = syscall.CloseHandle(hMMF) + }() + + addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) + if err != nil { + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + m := Mmb{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = int(size) + h.Cap = h.Len + return m, nil +} + +// Delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = Mmb{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("UnmapViewOfFile error: %v", err)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go new file mode 100644 index 0000000000..c8528a2e3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go new file mode 100644 index 0000000000..c7922076f3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -0,0 +1,271 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "hash/crc64" + "io" + "net" + "net/url" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" + HeaderXmsVersion = "x-ms-version" + HeaderXmsRequestID = "x-ms-request-id" +) + +const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 + +var CRC64Table = crc64.MakeTable(crc64Polynomial) + +// CopyOptions returns a zero-value T if opts is nil. +// If opts is not nil, a copy is made and its address returned. +func CopyOptions[T any](opts *T) *T { + if opts == nil { + return new(T) + } + cp := *opts + return &cp +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"] + accountName, has_accountName := connStrMap["AccountName"] + + var serviceURL string + if has_blobEndpoint { + serviceURL = blobEndpoint + } else if has_accountName { + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint") + } + + if !strings.HasSuffix(serviceURL, "/") { + // add a trailing slash to be consistent with the portal + serviceURL += "/" + } + + accountKey, has_accountKey := connStrMap["AccountKey"] + sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"] + + if has_accountName && has_accountKey { + return ParsedConnectionString{ + ServiceURL: serviceURL, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } else if has_sharedAccessSignature { + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature), + }, nil + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature") + } + +} + +// SerializeBlobTags converts tags to generated.BlobTags +func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { + blobTagSet := make([]*generated.BlobTag, 0) + for key, val := range tagsMap { + newKey, newVal := key, val + blobTagSet = append(blobTagSet, &generated.BlobTag{Key: &newKey, Value: &newVal}) + } + return &generated.BlobTags{BlobTagSet: blobTagSet} +} + +func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if len(tagsMap) == 0 { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} + +func RangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +// ReadAtLeast reads from r into buf until it has read at least min bytes. +// It returns the number of bytes copied and an error. +// The EOF error is returned if no bytes were read or +// EOF happened after reading fewer than min bytes. +// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. +// On return, n >= min if and only if err == nil. +// If r returns an error having read at least min bytes, the error is dropped. +// This method is same as io.ReadAtLeast except that it does not +// return io.ErrUnexpectedEOF when fewer than min bytes are read. +func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { + if len(buf) < min { + return 0, io.ErrShortBuffer + } + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n >= min { + err = nil + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go new file mode 100644 index 0000000000..4069bb1320 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -0,0 +1,229 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + EncryptionScope string `param:"ses"` +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + "b", // blob service + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + v.EncryptionScope, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, + + // Account-specific SAS parameters + services: "b", // will always be "b" + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. +type AccountPermissions struct { + Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSignatureValues' Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.PermanentDelete { + buffer.WriteRune('y') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.SetImmutabilityPolicy { + buffer.WriteRune('i') + } + return buffer.String() +} + +// Parse initializes the AccountPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'y': + p.PermanentDelete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSignatureValues' ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) + } + } + return rt, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go new file mode 100644 index 0000000000..20f9875a96 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -0,0 +1,452 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "net" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 +) + +var ( + // Version is the default version encoded in the SAS token. + Version = generated.ServiceVersion +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol. + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol. + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(exported.SnapshotTimeFormat) + } + return ss, se, sh +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + authorizedObjectID string `param:"saoid"` + unauthorizedObjectID string `param:"suoid"` + correlationID string `param:"scid"` + encryptionScope string `param:"ses"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// AuthorizedObjectID returns authorizedObjectID. +func (p *QueryParameters) AuthorizedObjectID() string { + return p.authorizedObjectID +} + +// UnauthorizedObjectID returns unauthorizedObjectID. +func (p *QueryParameters) UnauthorizedObjectID() string { + return p.unauthorizedObjectID +} + +// SignedCorrelationID returns signedCorrelationID. +func (p *QueryParameters) SignedCorrelationID() string { + return p.correlationID +} + +// EncryptionScope returns encryptionScope +func (p *QueryParameters) EncryptionScope() string { + return p.encryptionScope +} + +// SignedOID returns signedOID. +func (p *QueryParameters) SignedOID() string { + return p.signedOID +} + +// SignedTID returns signedTID. +func (p *QueryParameters) SignedTID() string { + return p.signedTID +} + +// SignedStart returns signedStart. +func (p *QueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry. +func (p *QueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService. +func (p *QueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion. +func (p *QueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime. +func (p *QueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version. +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services. +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes. +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol. +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime. +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime. +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange. +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier. +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource. +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions. +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature. +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl. +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition. +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding. +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage. +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns contentType. +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth. +func (p *QueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOID != "" { + v.Add("skoid", p.signedOID) + v.Add("sktid", p.signedTID) + v.Add("skt", p.signedStart.Format(TimeFormat)) + v.Add("ske", p.signedExpiry.Format(TimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.authorizedObjectID != "" { + v.Add("saoid", p.authorizedObjectID) + } + if p.unauthorizedObjectID != "" { + v.Add("suoid", p.unauthorizedObjectID) + } + if p.correlationID != "" { + v.Add("scid", p.correlationID) + } + if p.encryptionScope != "" { + v.Add("ses", p.encryptionScope) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(TimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(TimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.authorizedObjectID = val + case "suoid": + p.unauthorizedObjectID = val + case "scid": + p.correlationID = val + case "ses": + p.encryptionScope = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go new file mode 100644 index 0000000000..813fa77a9e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -0,0 +1,472 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// For more information on creating user delegation sas, see https://docs.microsoft.com/rest/api/storageservices/create-user-delegation-sas +type BlobSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing ContainerPermissions or BlobPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + AuthorizedObjectID string // saoid + UnauthorizedObjectID string // suoid + CorrelationID string // scid + EncryptionScope string `param:"ses"` +} + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") + } + + // Parse the resource + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + signedIdentifier := v.Identifier + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.EncryptionScope, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + if userDelegationCredential == nil { + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") + } + + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions") + } + + // Parse the resource + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + udk := exported.GetUDKParams(userDelegationCredential) + + udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{}) + + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory), + *udk.SignedOID, + *udk.SignedTID, + udkStart, + udkExpiry, + *udk.SignedService, + *udk.SignedVersion, + v.AuthorizedObjectID, + v.UnauthorizedObjectID, + v.CorrelationID, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.EncryptionScope, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, + // Calculated SAS signature + signature: signature, + } + + // User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.ReplaceAll(blobName, "\\", "/")) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type ContainerPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, FilterByTags, Move, SetImmutabilityPolicy bool + Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSignatureValues' Permissions field. +func (p *ContainerPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.FilterByTags { + b.WriteRune('f') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + if p.SetImmutabilityPolicy { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes ContainerPermissions' fields from a string. +func parseContainerPermissions(s string) (ContainerPermissions, error) { + p := ContainerPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} + +// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. +type BlobPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Tag, Move, Execute, Ownership, Permissions, SetImmutabilityPolicy bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSignatureValues' Permissions field. +func (p *BlobPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.PermanentDelete { + b.WriteRune('y') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + if p.SetImmutabilityPolicy { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes BlobPermissions' fields from a string. +func parseBlobPermissions(s string) (BlobPermissions, error) { + p := BlobPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'y': + p.PermanentDelete = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go new file mode 100644 index 0000000000..758739cb82 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +const ( + snapshot = "snapshot" + versionId = "versionid" +) + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS QueryParameters + UnparsedParams string + VersionID string // "" if not versioning enabled +} + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return URLParts{}, err + } + + up := URLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + // Find the container & blob names (if any) + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if shared.IsIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + path = "" // No ContainerName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + + up.Snapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + + up.VersionID = "" // Assume no versionID + if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { + up.VersionID = versionIDs[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, versionId) // delete "versionid" from paramsMap + delete(paramsMap, "versionId") // delete "versionId" from paramsMap + } + + up.SAS = NewQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up URLParts) String() string { + path := "" + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + // If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { + up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) + } + + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += versionId + "=" + up.VersionID + } + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} + +type caseInsensitiveValues url.Values // map[string][]string + +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go index bf18f8f5cc..a180b3ae52 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go +++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -1417,9 +1417,11 @@ type BuildHistoryRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ActiveOnly bool `protobuf:"varint,1,opt,name=ActiveOnly,proto3" json:"ActiveOnly,omitempty"` - Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"` - EarlyExit bool `protobuf:"varint,3,opt,name=EarlyExit,proto3" json:"EarlyExit,omitempty"` + ActiveOnly bool `protobuf:"varint,1,opt,name=ActiveOnly,proto3" json:"ActiveOnly,omitempty"` + Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"` + EarlyExit bool `protobuf:"varint,3,opt,name=EarlyExit,proto3" json:"EarlyExit,omitempty"` + Filter []string `protobuf:"bytes,4,rep,name=Filter,proto3" json:"Filter,omitempty"` + Limit int32 `protobuf:"varint,5,opt,name=Limit,proto3" json:"Limit,omitempty"` } func (x *BuildHistoryRequest) Reset() { @@ -1473,6 +1475,20 @@ func (x *BuildHistoryRequest) GetEarlyExit() bool { return false } +func (x *BuildHistoryRequest) GetFilter() []string { + if x != nil { + return x.Filter + } + return nil +} + +func (x *BuildHistoryRequest) GetLimit() int32 { + if x != nil { + return x.Limit + } + return 0 +} + type BuildHistoryEvent struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2274,211 +2290,214 @@ var file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc = [ 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x65, 0x0a, 0x13, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x41, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x61, 0x72, - 0x6c, 0x79, 0x45, 0x78, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x45, 0x61, - 0x72, 0x6c, 0x79, 0x45, 0x78, 0x69, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x6f, 0x62, + 0x22, 0x93, 0x01, 0x0a, 0x13, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x61, + 0x72, 0x6c, 0x79, 0x45, 0x78, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x45, + 0x61, 0x72, 0x6c, 0x79, 0x45, 0x78, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x14, 0x0a, 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x09, 0x0a, 0x12, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, - 0x66, 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x5d, 0x0a, - 0x0d, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x46, 0x72, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x12, 0x38, 0x0a, 0x09, - 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x09, 0x45, 0x78, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x72, 0x73, 0x12, 0x28, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x38, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x43, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x66, 0x0a, 0x10, 0x45, 0x78, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x09, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x10, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x4b, 0x0a, - 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x6f, 0x62, 0x79, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, + 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x09, 0x0a, 0x12, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, + 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x5d, 0x0a, 0x0d, + 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x46, 0x72, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x45, + 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, - 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x12, 0x24, - 0x0a, 0x0d, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, - 0x74, 0x65, 0x70, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x11, 0x6e, 0x75, 0x6d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x53, 0x74, 0x65, - 0x70, 0x73, 0x12, 0x42, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x57, 0x61, 0x72, - 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, - 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x45, 0x78, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x5d, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x79, - 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x52, - 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, - 0x06, 0x50, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x50, - 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe8, 0x01, 0x0a, 0x0a, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, - 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, - 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xc1, 0x02, 0x0a, 0x0f, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x40, 0x0a, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x0c, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x09, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x72, 0x73, 0x12, 0x28, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x38, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x10, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x39, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x4b, 0x0a, 0x07, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x41, 0x74, 0x74, 0x72, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x72, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x41, - 0x74, 0x74, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, + 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, + 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6e, + 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x12, 0x24, 0x0a, + 0x0d, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, + 0x65, 0x70, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, + 0x6e, 0x75, 0x6d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, + 0x73, 0x12, 0x42, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x57, 0x61, 0x72, 0x6e, + 0x69, 0x6e, 0x67, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x57, + 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x46, 0x72, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x45, 0x78, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x3f, - 0x0a, 0x15, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x41, 0x52, 0x54, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x32, - 0x89, 0x06, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x54, 0x0a, 0x09, 0x44, - 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x6b, - 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, + 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x79, 0x0a, + 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, + 0x50, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x50, 0x69, + 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe8, 0x01, 0x0a, 0x0a, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xc1, 0x02, 0x0a, 0x0f, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x44, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x40, 0x0a, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x52, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x48, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, + 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x0c, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x48, 0x0a, 0x05, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, - 0x75, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, - 0x61, 0x67, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x30, 0x01, 0x12, 0x48, 0x0a, 0x05, 0x53, - 0x6f, 0x6c, 0x76, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x1f, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x20, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x30, 0x01, 0x12, 0x4d, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, - 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, - 0x01, 0x30, 0x01, 0x12, 0x5a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x73, 0x12, 0x24, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x45, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x41, 0x74, 0x74, 0x72, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x72, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x41, 0x74, + 0x74, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x3f, 0x0a, + 0x15, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x32, 0x89, + 0x06, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x54, 0x0a, 0x09, 0x44, 0x69, + 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x6f, + 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x48, 0x0a, 0x05, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x75, + 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, + 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x30, 0x01, 0x12, 0x48, 0x0a, 0x05, 0x53, 0x6f, + 0x6c, 0x76, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, + 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x30, 0x01, 0x12, 0x4d, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, + 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1e, + 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, + 0x30, 0x01, 0x12, 0x5a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x73, 0x12, 0x24, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, + 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x6d, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x6d, 0x6f, + 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, + 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, + 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x30, 0x01, 0x12, 0x6f, 0x0a, 0x12, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, + 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x30, 0x01, 0x12, 0x6f, 0x0a, 0x12, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x12, 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x3b, 0x6d, 0x6f, 0x62, - 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x3b, 0x6d, 0x6f, 0x62, 0x79, + 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto index 5ea908b917..85da9a72a7 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.proto +++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -180,6 +180,8 @@ message BuildHistoryRequest { bool ActiveOnly = 1; string Ref = 2; bool EarlyExit = 3; + repeated string Filter = 4; + int32 Limit = 5; } enum BuildHistoryEventType { diff --git a/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go index c53841143e..e4daa74ca5 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go +++ b/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go @@ -558,6 +558,12 @@ func (m *BuildHistoryRequest) CloneVT() *BuildHistoryRequest { r.ActiveOnly = m.ActiveOnly r.Ref = m.Ref r.EarlyExit = m.EarlyExit + r.Limit = m.Limit + if rhs := m.Filter; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Filter = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1569,6 +1575,18 @@ func (this *BuildHistoryRequest) EqualVT(that *BuildHistoryRequest) bool { if this.EarlyExit != that.EarlyExit { return false } + if len(this.Filter) != len(that.Filter) { + return false + } + for i, vx := range this.Filter { + vy := that.Filter[i] + if vx != vy { + return false + } + } + if this.Limit != that.Limit { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -3272,6 +3290,20 @@ func (m *BuildHistoryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Limit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x28 + } + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Filter[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if m.EarlyExit { i-- if m.EarlyExit { @@ -4465,6 +4497,15 @@ func (m *BuildHistoryRequest) SizeVT() (n int) { if m.EarlyExit { n += 2 } + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Limit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Limit)) + } n += len(m.unknownFields) return n } @@ -8694,6 +8735,57 @@ func (m *BuildHistoryRequest) UnmarshalVT(dAtA []byte) error { } } m.EarlyExit = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/api/types/worker.pb.go b/vendor/github.com/moby/buildkit/api/types/worker.pb.go index c4065f6391..b49b47d322 100644 --- a/vendor/github.com/moby/buildkit/api/types/worker.pb.go +++ b/vendor/github.com/moby/buildkit/api/types/worker.pb.go @@ -31,6 +31,7 @@ type WorkerRecord struct { Platforms []*pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"` GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"` BuildkitVersion *BuildkitVersion `protobuf:"bytes,5,opt,name=BuildkitVersion,proto3" json:"BuildkitVersion,omitempty"` + CDIDevices []*CDIDevice `protobuf:"bytes,6,rep,name=CDIDevices,proto3" json:"CDIDevices,omitempty"` } func (x *WorkerRecord) Reset() { @@ -98,6 +99,13 @@ func (x *WorkerRecord) GetBuildkitVersion() *BuildkitVersion { return nil } +func (x *WorkerRecord) GetCDIDevices() []*CDIDevice { + if x != nil { + return x.CDIDevices + } + return nil +} + type GCPolicy struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -245,6 +253,75 @@ func (x *BuildkitVersion) GetRevision() string { return "" } +type CDIDevice struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + AutoAllow bool `protobuf:"varint,2,opt,name=AutoAllow,proto3" json:"AutoAllow,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=Annotations,proto3" json:"Annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + OnDemand bool `protobuf:"varint,4,opt,name=OnDemand,proto3" json:"OnDemand,omitempty"` +} + +func (x *CDIDevice) Reset() { + *x = CDIDevice{} + mi := &file_github_com_moby_buildkit_api_types_worker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CDIDevice) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CDIDevice) ProtoMessage() {} + +func (x *CDIDevice) ProtoReflect() protoreflect.Message { + mi := &file_github_com_moby_buildkit_api_types_worker_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CDIDevice.ProtoReflect.Descriptor instead. +func (*CDIDevice) Descriptor() ([]byte, []int) { + return file_github_com_moby_buildkit_api_types_worker_proto_rawDescGZIP(), []int{3} +} + +func (x *CDIDevice) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CDIDevice) GetAutoAllow() bool { + if x != nil { + return x.AutoAllow + } + return false +} + +func (x *CDIDevice) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *CDIDevice) GetOnDemand() bool { + if x != nil { + return x.OnDemand + } + return false +} + var File_github_com_moby_buildkit_api_types_worker_proto protoreflect.FileDescriptor var file_github_com_moby_buildkit_api_types_worker_proto_rawDesc = []byte{ @@ -255,7 +332,7 @@ var file_github_com_moby_buildkit_api_types_worker_proto_rawDesc = []byte{ 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x62, 0x2f, 0x6f, 0x70, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x02, 0x0a, 0x0c, 0x57, 0x6f, 0x72, 0x6b, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x03, 0x0a, 0x0c, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x48, 0x0a, 0x06, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, @@ -273,34 +350,54 @@ var file_github_com_moby_buildkit_api_types_worker_proto_rawDesc = []byte{ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, - 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, 0x01, 0x0a, 0x08, 0x47, - 0x43, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x6b, 0x65, 0x65, - 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0c, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, - 0x0c, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x46, 0x72, 0x65, 0x65, - 0x53, 0x70, 0x61, 0x63, 0x65, 0x22, 0x61, 0x0a, 0x0f, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, - 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x6d, - 0x6f, 0x62, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x5f, 0x76, 0x31, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x0a, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x44, 0x49, + 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0a, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, 0x01, + 0x0a, 0x08, 0x47, 0x43, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x22, 0x0a, 0x0c, + 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0c, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x46, 0x72, 0x65, 0x65, 0x53, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x46, + 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x22, 0x61, 0x0a, 0x0f, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xef, 0x01, 0x0a, 0x09, + 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x54, 0x0a, 0x0b, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x4f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x4f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x1a, 0x3e, 0x0a, + 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x3b, 0x5a, + 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, + 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x3b, 0x6d, 0x6f, 0x62, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, + 0x74, 0x5f, 0x76, 0x31, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -315,24 +412,28 @@ func file_github_com_moby_buildkit_api_types_worker_proto_rawDescGZIP() []byte { return file_github_com_moby_buildkit_api_types_worker_proto_rawDescData } -var file_github_com_moby_buildkit_api_types_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_moby_buildkit_api_types_worker_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_github_com_moby_buildkit_api_types_worker_proto_goTypes = []any{ (*WorkerRecord)(nil), // 0: moby.buildkit.v1.types.WorkerRecord (*GCPolicy)(nil), // 1: moby.buildkit.v1.types.GCPolicy (*BuildkitVersion)(nil), // 2: moby.buildkit.v1.types.BuildkitVersion - nil, // 3: moby.buildkit.v1.types.WorkerRecord.LabelsEntry - (*pb.Platform)(nil), // 4: pb.Platform + (*CDIDevice)(nil), // 3: moby.buildkit.v1.types.CDIDevice + nil, // 4: moby.buildkit.v1.types.WorkerRecord.LabelsEntry + nil, // 5: moby.buildkit.v1.types.CDIDevice.AnnotationsEntry + (*pb.Platform)(nil), // 6: pb.Platform } var file_github_com_moby_buildkit_api_types_worker_proto_depIdxs = []int32{ - 3, // 0: moby.buildkit.v1.types.WorkerRecord.Labels:type_name -> moby.buildkit.v1.types.WorkerRecord.LabelsEntry - 4, // 1: moby.buildkit.v1.types.WorkerRecord.platforms:type_name -> pb.Platform + 4, // 0: moby.buildkit.v1.types.WorkerRecord.Labels:type_name -> moby.buildkit.v1.types.WorkerRecord.LabelsEntry + 6, // 1: moby.buildkit.v1.types.WorkerRecord.platforms:type_name -> pb.Platform 1, // 2: moby.buildkit.v1.types.WorkerRecord.GCPolicy:type_name -> moby.buildkit.v1.types.GCPolicy 2, // 3: moby.buildkit.v1.types.WorkerRecord.BuildkitVersion:type_name -> moby.buildkit.v1.types.BuildkitVersion - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 3, // 4: moby.buildkit.v1.types.WorkerRecord.CDIDevices:type_name -> moby.buildkit.v1.types.CDIDevice + 5, // 5: moby.buildkit.v1.types.CDIDevice.Annotations:type_name -> moby.buildkit.v1.types.CDIDevice.AnnotationsEntry + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_github_com_moby_buildkit_api_types_worker_proto_init() } @@ -346,7 +447,7 @@ func file_github_com_moby_buildkit_api_types_worker_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_moby_buildkit_api_types_worker_proto_rawDesc, NumEnums: 0, - NumMessages: 4, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/moby/buildkit/api/types/worker.proto b/vendor/github.com/moby/buildkit/api/types/worker.proto index 51a92d6b6c..8f56566467 100644 --- a/vendor/github.com/moby/buildkit/api/types/worker.proto +++ b/vendor/github.com/moby/buildkit/api/types/worker.proto @@ -12,6 +12,7 @@ message WorkerRecord { repeated pb.Platform platforms = 3; repeated GCPolicy GCPolicy = 4; BuildkitVersion BuildkitVersion = 5; + repeated CDIDevice CDIDevices = 6; } message GCPolicy { @@ -30,3 +31,10 @@ message BuildkitVersion { string version = 2; string revision = 3; } + +message CDIDevice { + string Name = 1; + bool AutoAllow = 2; + map Annotations = 3; + bool OnDemand = 4; +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/api/types/worker_vtproto.pb.go b/vendor/github.com/moby/buildkit/api/types/worker_vtproto.pb.go index 62ed2afe1e..6113a60484 100644 --- a/vendor/github.com/moby/buildkit/api/types/worker_vtproto.pb.go +++ b/vendor/github.com/moby/buildkit/api/types/worker_vtproto.pb.go @@ -48,6 +48,13 @@ func (m *WorkerRecord) CloneVT() *WorkerRecord { } r.GCPolicy = tmpContainer } + if rhs := m.CDIDevices; rhs != nil { + tmpContainer := make([]*CDIDevice, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CDIDevices = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -104,6 +111,32 @@ func (m *BuildkitVersion) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *CDIDevice) CloneVT() *CDIDevice { + if m == nil { + return (*CDIDevice)(nil) + } + r := new(CDIDevice) + r.Name = m.Name + r.AutoAllow = m.AutoAllow + r.OnDemand = m.OnDemand + if rhs := m.Annotations; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Annotations = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CDIDevice) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (this *WorkerRecord) EqualVT(that *WorkerRecord) bool { if this == that { return true @@ -162,6 +195,23 @@ func (this *WorkerRecord) EqualVT(that *WorkerRecord) bool { if !this.BuildkitVersion.EqualVT(that.BuildkitVersion) { return false } + if len(this.CDIDevices) != len(that.CDIDevices) { + return false + } + for i, vx := range this.CDIDevices { + vy := that.CDIDevices[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CDIDevice{} + } + if q == nil { + q = &CDIDevice{} + } + if !p.EqualVT(q) { + return false + } + } + } return string(this.unknownFields) == string(that.unknownFields) } @@ -237,6 +287,43 @@ func (this *BuildkitVersion) EqualMessageVT(thatMsg proto.Message) bool { } return this.EqualVT(that) } +func (this *CDIDevice) EqualVT(that *CDIDevice) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.AutoAllow != that.AutoAllow { + return false + } + if len(this.Annotations) != len(that.Annotations) { + return false + } + for i, vx := range this.Annotations { + vy, ok := that.Annotations[i] + if !ok { + return false + } + if vx != vy { + return false + } + } + if this.OnDemand != that.OnDemand { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CDIDevice) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CDIDevice) + if !ok { + return false + } + return this.EqualVT(that) +} func (m *WorkerRecord) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -267,6 +354,18 @@ func (m *WorkerRecord) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.CDIDevices) > 0 { + for iNdEx := len(m.CDIDevices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CDIDevices[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } if m.BuildkitVersion != nil { size, err := m.BuildkitVersion.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -456,6 +555,85 @@ func (m *BuildkitVersion) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CDIDevice) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CDIDevice) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CDIDevice) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OnDemand { + i-- + if m.OnDemand { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.AutoAllow { + i-- + if m.AutoAllow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *WorkerRecord) SizeVT() (n int) { if m == nil { return 0 @@ -490,6 +668,12 @@ func (m *WorkerRecord) SizeVT() (n int) { l = m.BuildkitVersion.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if len(m.CDIDevices) > 0 { + for _, e := range m.CDIDevices { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -547,6 +731,34 @@ func (m *BuildkitVersion) SizeVT() (n int) { return n } +func (m *CDIDevice) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.AutoAllow { + n += 2 + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if m.OnDemand { + n += 2 + } + n += len(m.unknownFields) + return n +} + func (m *WorkerRecord) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -839,6 +1051,40 @@ func (m *WorkerRecord) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CDIDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CDIDevices = append(m.CDIDevices, &CDIDevice{}) + if err := m.CDIDevices[len(m.CDIDevices)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -1187,3 +1433,253 @@ func (m *BuildkitVersion) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *CDIDevice) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CDIDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CDIDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoAllow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoAllow = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDemand", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OnDemand = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go index bbc92a217d..5c20695d12 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go @@ -7,6 +7,8 @@ import ( "fmt" "io" "os" + "strconv" + "strings" "sync" "time" @@ -39,6 +41,7 @@ const ( attrURL = "url" attrRepository = "repository" attrGHToken = "ghtoken" + attrAPIVersion = "version" version = "1" defaultTimeout = 10 * time.Minute @@ -50,6 +53,7 @@ type Config struct { Token string // token for the Github Cache runtime API GHToken string // token for the Github REST API Repository string + Version int Timeout time.Duration } @@ -66,6 +70,23 @@ func getConfig(attrs map[string]string) (*Config, error) { if !ok { return nil, errors.Errorf("token not set for github actions cache") } + var apiVersionInt int + apiVersion, ok := attrs[attrAPIVersion] + if ok { + i, err := strconv.ParseInt(apiVersion, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse api version %q, expected positive integer", apiVersion) + } + apiVersionInt = int(i) + } + // best effort on old clients + if apiVersionInt == 0 { + if strings.Contains(url, "results-receiver.actions.githubusercontent.com") { + apiVersionInt = 2 + } else { + apiVersionInt = 1 + } + } timeout := defaultTimeout if v, ok := attrs[attrTimeout]; ok { @@ -82,6 +103,7 @@ func getConfig(attrs map[string]string) (*Config, error) { Timeout: timeout, GHToken: attrs[attrGHToken], Repository: attrs[attrRepository], + Version: apiVersionInt, }, nil } @@ -107,7 +129,7 @@ type exporter struct { func NewExporter(c *Config) (remotecache.Exporter, error) { cc := v1.NewCacheChains() - cache, err := actionscache.New(c.Token, c.URL, actionscache.Opt{ + cache, err := actionscache.New(c.Token, c.URL, c.Version > 1, actionscache.Opt{ Client: tracing.DefaultClient, Timeout: c.Timeout, }) @@ -282,7 +304,7 @@ type importer struct { } func NewImporter(c *Config) (remotecache.Importer, error) { - cache, err := actionscache.New(c.Token, c.URL, actionscache.Opt{ + cache, err := actionscache.New(c.Token, c.URL, c.Version > 1, actionscache.Opt{ Client: tracing.DefaultClient, Timeout: c.Timeout, }) diff --git a/vendor/github.com/moby/buildkit/client/info.go b/vendor/github.com/moby/buildkit/client/info.go index d5bdbcec89..a637ffae16 100644 --- a/vendor/github.com/moby/buildkit/client/info.go +++ b/vendor/github.com/moby/buildkit/client/info.go @@ -18,6 +18,13 @@ type BuildkitVersion struct { Revision string `json:"revision"` } +type CDIDevice struct { + Name string `json:"name"` + AutoAllow bool `json:"autoAllow"` + Annotations map[string]string `json:"annotations"` + OnDemand bool `json:"onDemand"` +} + func (c *Client) Info(ctx context.Context) (*Info, error) { res, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{}) if err != nil { @@ -38,3 +45,16 @@ func fromAPIBuildkitVersion(in *apitypes.BuildkitVersion) BuildkitVersion { Revision: in.Revision, } } + +func fromAPICDIDevices(in []*apitypes.CDIDevice) []CDIDevice { + var out []CDIDevice + for _, d := range in { + out = append(out, CDIDevice{ + Name: d.Name, + AutoAllow: d.AutoAllow, + Annotations: d.Annotations, + OnDemand: d.OnDemand, + }) + } + return out +} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go index a10fb94a26..ba66e7332c 100644 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -60,6 +60,7 @@ type ExecOp struct { isValidated bool secrets []SecretInfo ssh []SSHInfo + cdiDevices []CDIDeviceInfo } func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output { @@ -266,6 +267,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] Network: network, Security: security, } + if network != NetModeSandbox { addCap(&e.constraints, pb.CapExecMetaNetwork) } @@ -321,6 +323,18 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] addCap(&e.constraints, pb.CapExecMountSSH) } + if len(e.cdiDevices) > 0 { + addCap(&e.constraints, pb.CapExecMetaCDI) + cd := make([]*pb.CDIDevice, len(e.cdiDevices)) + for i, d := range e.cdiDevices { + cd[i] = &pb.CDIDevice{ + Name: d.Name, + Optional: d.Optional, + } + } + peo.CdiDevices = cd + } + if e.constraints.Platform == nil { p, err := getPlatform(e.base)(ctx, c) if err != nil { @@ -624,6 +638,41 @@ func AddUlimit(name UlimitName, soft int64, hard int64) RunOption { }) } +func AddCDIDevice(opts ...CDIDeviceOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + c := &CDIDeviceInfo{} + for _, opt := range opts { + opt.SetCDIDeviceOption(c) + } + ei.CDIDevices = append(ei.CDIDevices, *c) + }) +} + +type CDIDeviceOption interface { + SetCDIDeviceOption(*CDIDeviceInfo) +} + +type cdiDeviceOptionFunc func(*CDIDeviceInfo) + +func (fn cdiDeviceOptionFunc) SetCDIDeviceOption(ci *CDIDeviceInfo) { + fn(ci) +} + +func CDIDeviceName(name string) CDIDeviceOption { + return cdiDeviceOptionFunc(func(ci *CDIDeviceInfo) { + ci.Name = name + }) +} + +var CDIDeviceOptional = cdiDeviceOptionFunc(func(ci *CDIDeviceInfo) { + ci.Optional = true +}) + +type CDIDeviceInfo struct { + Name string + Optional bool +} + func ValidExitCodes(codes ...int) RunOption { return runOptionFunc(func(ei *ExecInfo) { ei.State = validExitCodes(codes...)(ei.State) @@ -815,6 +864,7 @@ type ExecInfo struct { ProxyEnv *ProxyEnv Secrets []SecretInfo SSH []SSHInfo + CDIDevices []CDIDeviceInfo } type MountInfo struct { diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go index b52aaa2443..5743a31e38 100644 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -295,6 +295,7 @@ func (s State) Run(ro ...RunOption) ExecState { } exec.secrets = ei.Secrets exec.ssh = ei.SSH + exec.cdiDevices = ei.CDIDevices return ExecState{ State: s.WithOutput(exec.Output()), diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go index 10b0cbf859..a555817514 100644 --- a/vendor/github.com/moby/buildkit/client/workers.go +++ b/vendor/github.com/moby/buildkit/client/workers.go @@ -18,6 +18,7 @@ type WorkerInfo struct { Platforms []ocispecs.Platform `json:"platforms"` GCPolicy []PruneInfo `json:"gcPolicy"` BuildkitVersion BuildkitVersion `json:"buildkitVersion"` + CDIDevices []CDIDevice `json:"cdiDevices"` } // ListWorkers lists all active workers @@ -42,6 +43,7 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([] Platforms: pb.ToSpecPlatforms(w.Platforms), GCPolicy: fromAPIGCPolicy(w.GCPolicy), BuildkitVersion: fromAPIBuildkitVersion(w.BuildkitVersion), + CDIDevices: fromAPICDIDevices(w.CDIDevices), }) } diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go index 480f0450a4..381effcdc9 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go @@ -23,6 +23,8 @@ type Config struct { OTEL OTELConfig `toml:"otel"` + CDI CDIConfig `toml:"cdi"` + Workers struct { OCI OCIConfig `toml:"oci"` Containerd ContainerdConfig `toml:"containerd"` @@ -74,6 +76,11 @@ type OTELConfig struct { SocketPath string `toml:"socketPath"` } +type CDIConfig struct { + Disabled *bool `toml:"disabled"` + SpecDirs []string `toml:"specDirs"` +} + type GCConfig struct { GC *bool `toml:"gc"` // Deprecated: use GCReservedSpace instead diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go index 592775d9d0..ca50913190 100644 --- a/vendor/github.com/moby/buildkit/control/control.go +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -33,6 +33,7 @@ import ( "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/bboltcachestorage" "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/llbsolver/proc" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/bklog" @@ -586,6 +587,7 @@ func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersR Platforms: pb.PlatformsFromSpec(w.Platforms(true)), GCPolicy: toPBGCPolicy(w.GCPolicy()), BuildkitVersion: toPBBuildkitVersion(w.BuildkitVersion()), + CDIDevices: toPBCDIDevices(w.CDIManager()), }) } return resp, nil @@ -684,6 +686,23 @@ func toPBBuildkitVersion(in client.BuildkitVersion) *apitypes.BuildkitVersion { } } +func toPBCDIDevices(manager *cdidevices.Manager) []*apitypes.CDIDevice { + if manager == nil { + return nil + } + devs := manager.ListDevices() + out := make([]*apitypes.CDIDevice, 0, len(devs)) + for _, dev := range devs { + out = append(out, &apitypes.CDIDevice{ + Name: dev.Name, + AutoAllow: true, // TODO + Annotations: dev.Annotations, + OnDemand: dev.OnDemand, + }) + } + return out +} + func findDuplicateCacheOptions(cacheOpts []*controlapi.CacheOptionsEntry) ([]*controlapi.CacheOptionsEntry, error) { seen := map[string]*controlapi.CacheOptionsEntry{} duplicate := map[string]struct{}{} diff --git a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go index 4022de0a6d..366959f014 100644 --- a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go @@ -22,6 +22,7 @@ import ( resourcestypes "github.com/moby/buildkit/executor/resources/types" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/network" "github.com/pkg/errors" @@ -40,6 +41,7 @@ type containerdExecutor struct { traceSocket string rootless bool runtime *RuntimeInfo + cdiManager *cdidevices.Manager } // OnCreateRuntimer provides an alternative to OCI hooks for applying network @@ -72,6 +74,7 @@ type ExecutorOptions struct { TraceSocket string Rootless bool Runtime *RuntimeInfo + CDIManager *cdidevices.Manager } // New creates a new executor backed by connection to containerd API @@ -92,6 +95,7 @@ func New(executorOpts ExecutorOptions) executor.Executor { traceSocket: executorOpts.TraceSocket, rootless: executorOpts.Rootless, runtime: executorOpts.Runtime, + cdiManager: executorOpts.CDIManager, } } diff --git a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_unix.go b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_unix.go index 10b6fababe..2233909cdf 100644 --- a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_unix.go +++ b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_unix.go @@ -145,7 +145,7 @@ func (w *containerdExecutor) createOCISpec(ctx context.Context, id, resolvConf, } processMode := oci.ProcessSandbox // FIXME(AkihiroSuda) - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...) + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, w.cdiManager, opts...) if err != nil { releaseAll() return nil, nil, err diff --git a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_windows.go b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_windows.go index 14f0cf444c..b09114deb4 100644 --- a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_windows.go +++ b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_windows.go @@ -88,7 +88,7 @@ func (w *containerdExecutor) createOCISpec(ctx context.Context, id, _, _ string, } processMode := oci.ProcessSandbox // FIXME(AkihiroSuda) - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, "", "", namespace, "", processMode, nil, "", false, w.traceSocket, opts...) + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, "", "", namespace, "", processMode, nil, "", false, w.traceSocket, nil, opts...) if err != nil { releaseAll() return nil, nil, err diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go index 9902392084..d35e18af65 100644 --- a/vendor/github.com/moby/buildkit/executor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -22,6 +22,7 @@ type Meta struct { ReadonlyRootFS bool ExtraHosts []HostIP Ulimit []*pb.Ulimit + CDIDevices []*pb.CDIDevice CgroupParent string NetMode pb.NetMode SecurityMode pb.SecurityMode diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec.go b/vendor/github.com/moby/buildkit/executor/oci/spec.go index fe0b03c4a1..a6034e6ed0 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec.go @@ -17,6 +17,7 @@ import ( "github.com/mitchellh/hashstructure/v2" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/util/network" rootlessmountopts "github.com/moby/buildkit/util/rootless/mountopts" traceexec "github.com/moby/buildkit/util/tracing/exec" @@ -59,7 +60,7 @@ func (pm ProcessMode) String() string { // GenerateSpec generates spec using containerd functionality. // opts are ignored for s.Process, s.Hostname, and s.Mounts . -func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, cgroupParent string, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, selinuxB bool, tracingSocket string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { +func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, cgroupParent string, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, selinuxB bool, tracingSocket string, cdiManager *cdidevices.Manager, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { c := &containers.Container{ ID: id, } @@ -129,6 +130,14 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou oci.WithHostname(hostname), ) + if cdiManager != nil { + if cdiOpts, err := generateCDIOpts(cdiManager, meta.CDIDevices); err == nil { + opts = append(opts, cdiOpts...) + } else { + return nil, nil, err + } + } + s, err := oci.GenerateSpec(ctx, nil, c, opts...) if err != nil { return nil, nil, errors.WithStack(err) diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_darwin.go b/vendor/github.com/moby/buildkit/executor/oci/spec_darwin.go index c2658ecb9e..ae609a216b 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_darwin.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_darwin.go @@ -5,6 +5,7 @@ import ( "github.com/containerd/containerd/v2/pkg/oci" "github.com/containerd/continuity/fs" "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -62,3 +63,10 @@ func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) { m.Source = src return m, func() error { return nil }, nil } + +func generateCDIOpts(_ *cdidevices.Manager, devices []*pb.CDIDevice) ([]oci.SpecOpts, error) { + if len(devices) == 0 { + return nil, nil + } + return nil, errors.New("no support for CDI on Darwin") +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_freebsd.go b/vendor/github.com/moby/buildkit/executor/oci/spec_freebsd.go index 73cbe7ec79..d1456a8eca 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_freebsd.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_freebsd.go @@ -5,6 +5,7 @@ import ( "github.com/containerd/containerd/v2/pkg/oci" "github.com/containerd/continuity/fs" "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -70,3 +71,10 @@ func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) { m.Source = src return m, func() error { return nil }, nil } + +func generateCDIOpts(_ *cdidevices.Manager, devices []*pb.CDIDevice) ([]oci.SpecOpts, error) { + if len(devices) == 0 { + return nil, nil + } + return nil, errors.New("no support for CDI on FreeBSD") +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_linux.go b/vendor/github.com/moby/buildkit/executor/oci/spec_linux.go index 9ee59fc85a..7b437c28b8 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_linux.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_linux.go @@ -17,7 +17,9 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/profiles/seccomp" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/entitlements/security" specs "github.com/opencontainers/runtime-spec/specs-go" selinux "github.com/opencontainers/selinux/go-selinux" @@ -148,6 +150,34 @@ func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) { }, nil } +// genereateCDIOptions creates the OCI runtime spec options for injecting CDI +// devices. +func generateCDIOpts(manager *cdidevices.Manager, devs []*pb.CDIDevice) ([]oci.SpecOpts, error) { + if len(devs) == 0 { + return nil, nil + } + + withCDIDevices := func(devs []*pb.CDIDevice) oci.SpecOpts { + return func(ctx context.Context, _ oci.Client, c *containers.Container, s *specs.Spec) error { + if err := manager.Refresh(); err != nil { + bklog.G(ctx).Warnf("CDI registry refresh failed: %v", err) + } + if err := manager.InjectDevices(s, devs...); err != nil { + return errors.Wrapf(err, "CDI device injection failed") + } + // One crucial thing to keep in mind is that CDI device injection + // might add OCI Spec environment variables, hooks, and mounts as + // well. Therefore, it is important that none of the corresponding + // OCI Spec fields are reset up in the call stack once we return. + return nil + } + } + + return []oci.SpecOpts{ + withCDIDevices(devs), + }, nil +} + // withDefaultProfile sets the default seccomp profile to the spec. // Note: must follow the setting of process capabilities func withDefaultProfile() oci.SpecOpts { diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go index d3059c3034..f21b28ed8b 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go @@ -14,6 +14,7 @@ import ( "github.com/containerd/containerd/v2/pkg/oci" "github.com/containerd/continuity/fs" "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -110,3 +111,11 @@ func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) { m.Source = src return m, func() error { return nil }, nil } + +func generateCDIOpts(_ *cdidevices.Manager, devices []*pb.CDIDevice) ([]oci.SpecOpts, error) { + if len(devices) == 0 { + return nil, nil + } + // https://github.com/cncf-tags/container-device-interface/issues/28 + return nil, errors.New("no support for CDI on Windows") +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go index f284c90cd1..1ca978bbb7 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -30,6 +30,7 @@ import ( resourcestypes "github.com/moby/buildkit/executor/resources/types" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/network" rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv" @@ -57,6 +58,7 @@ type Opt struct { SELinux bool TracingSocket string ResourceMonitor *resources.Monitor + CDIManager *cdidevices.Manager } var defaultCommandCandidates = []string{"buildkit-runc", "runc"} @@ -78,6 +80,7 @@ type runcExecutor struct { selinux bool tracingSocket string resmon *resources.Monitor + cdiManager *cdidevices.Manager } func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) { @@ -144,6 +147,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex selinux: opt.SELinux, tracingSocket: opt.TracingSocket, resmon: opt.ResourceMonitor, + cdiManager: opt.CDIManager, } return w, nil } @@ -267,7 +271,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } } - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, opts...) + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, w.cdiManager, opts...) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go index 88e5943ebd..3912f1d3b6 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -206,17 +206,17 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS return nil, errors.Errorf("Client and MainContext cannot both be provided") } - namedContext := func(ctx context.Context, name string, copt dockerui.ContextOpt) (*llb.State, *dockerspec.DockerOCIImage, error) { + namedContext := func(name string, copt dockerui.ContextOpt) (*dockerui.NamedContext, error) { if opt.Client == nil { - return nil, nil, nil + return nil, nil } if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { if copt.Platform == nil { copt.Platform = opt.TargetPlatform } - return opt.Client.NamedContext(ctx, name, copt) + return opt.Client.NamedContext(name, copt) } - return nil, nil, nil + return nil, nil } lint, err := newRuleLinter(dt, &opt) @@ -349,7 +349,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if st.Name != "" { - s, img, err := namedContext(ctx, st.Name, dockerui.ContextOpt{ + nc, err := namedContext(st.Name, dockerui.ContextOpt{ Platform: ds.platform, ResolveMode: opt.ImageResolveMode.String(), AsyncLocalOpts: ds.asyncLocalOpts, @@ -357,25 +357,8 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS if err != nil { return nil, err } - if s != nil { - ds.dispatched = true - ds.state = *s - if img != nil { - // timestamps are inherited as-is, regardless to SOURCE_DATE_EPOCH - // https://github.com/moby/buildkit/issues/4614 - ds.image = *img - if img.Architecture != "" && img.OS != "" { - ds.platform = &ocispecs.Platform{ - OS: img.OS, - Architecture: img.Architecture, - Variant: img.Variant, - OSVersion: img.OSVersion, - } - if img.OSFeatures != nil { - ds.platform.OSFeatures = append([]string{}, img.OSFeatures...) - } - } - } + if nc != nil { + ds.namedContext = nc allDispatchStates.addState(ds) ds.base = nil // reset base set by addState continue @@ -467,7 +450,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS // resolve image config for every stage if d.base == nil && !d.dispatched && !d.resolved { d.resolved = reachable // avoid re-resolving if called again after onbuild - if d.stage.BaseName == emptyImageName { + if d.stage.BaseName == emptyImageName && d.namedContext == nil { d.state = llb.Scratch() d.image = emptyImage(platformOpt.targetPlatform) d.platform = &platformOpt.targetPlatform @@ -499,25 +482,58 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS d.stage.BaseName = reference.TagNameOnly(ref).String() var isScratch bool - st, img, err := namedContext(ctx, d.stage.BaseName, dockerui.ContextOpt{ - ResolveMode: opt.ImageResolveMode.String(), - Platform: platform, - AsyncLocalOpts: d.asyncLocalOpts, - }) - if err != nil { - return err - } - if st != nil { - if img != nil { - d.image = *img - } else { - d.image = emptyImage(platformOpt.targetPlatform) - } - d.state = st.Platform(*platform) - d.platform = platform - return nil - } if reachable { + // stage was named context + if d.namedContext != nil { + st, img, err := d.namedContext.Load(ctx) + if err != nil { + return err + } + d.dispatched = true + d.state = *st + if img != nil { + // timestamps are inherited as-is, regardless to SOURCE_DATE_EPOCH + // https://github.com/moby/buildkit/issues/4614 + d.image = *img + if img.Architecture != "" && img.OS != "" { + d.platform = &ocispecs.Platform{ + OS: img.OS, + Architecture: img.Architecture, + Variant: img.Variant, + OSVersion: img.OSVersion, + } + if img.OSFeatures != nil { + d.platform.OSFeatures = append([]string{}, img.OSFeatures...) + } + } + } + return nil + } + + // check if base is named context + nc, err := namedContext(d.stage.BaseName, dockerui.ContextOpt{ + ResolveMode: opt.ImageResolveMode.String(), + Platform: platform, + AsyncLocalOpts: d.asyncLocalOpts, + }) + if err != nil { + return err + } + if nc != nil { + st, img, err := nc.Load(ctx) + if err != nil { + return err + } + if img != nil { + d.image = *img + } else { + d.image = emptyImage(platformOpt.targetPlatform) + } + d.state = st.Platform(*platform) + d.platform = platform + return nil + } + prefix := "[" if opt.MultiPlatformRequested && platform != nil { prefix += platforms.FormatAll(*platform) + " " @@ -708,6 +724,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS extraHosts: opt.ExtraHosts, shmSize: opt.ShmSize, ulimit: opt.Ulimits, + devices: opt.Devices, cgroupParent: opt.CgroupParent, llbCaps: opt.LLBCaps, sourceMap: opt.SourceMap, @@ -785,10 +802,15 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS target.state = target.state.SetMarshalDefaults(defaults...) if !platformOpt.implicitTarget { + sameOsArch := platformOpt.targetPlatform.OS == target.image.OS && platformOpt.targetPlatform.Architecture == target.image.Architecture target.image.OS = platformOpt.targetPlatform.OS target.image.Architecture = platformOpt.targetPlatform.Architecture - target.image.Variant = platformOpt.targetPlatform.Variant - target.image.OSVersion = platformOpt.targetPlatform.OSVersion + if platformOpt.targetPlatform.Variant != "" || !sameOsArch { + target.image.Variant = platformOpt.targetPlatform.Variant + } + if platformOpt.targetPlatform.OSVersion != "" || !sameOsArch { + target.image.OSVersion = platformOpt.targetPlatform.OSVersion + } if platformOpt.targetPlatform.OSFeatures != nil { target.image.OSFeatures = append([]string{}, platformOpt.targetPlatform.OSFeatures...) } @@ -843,6 +865,7 @@ type dispatchOpt struct { extraHosts []llb.HostIP shmSize int64 ulimit []*pb.Ulimit + devices []*pb.CDIDevice cgroupParent string llbCaps *apicaps.CapSet sourceMap *llb.SourceMap @@ -1015,19 +1038,20 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { } type dispatchState struct { - opt dispatchOpt - state llb.State - image dockerspec.DockerOCIImage - platform *ocispecs.Platform - stage instructions.Stage - base *dispatchState - baseImg *dockerspec.DockerOCIImage // immutable, unlike image - dispatched bool - resolved bool // resolved is set to true if base image has been resolved - onBuildInit bool - deps map[*dispatchState]instructions.Command - buildArgs []instructions.KeyValuePairOptional - commands []command + opt dispatchOpt + state llb.State + image dockerspec.DockerOCIImage + namedContext *dockerui.NamedContext + platform *ocispecs.Platform + stage instructions.Stage + base *dispatchState + baseImg *dockerspec.DockerOCIImage // immutable, unlike image + dispatched bool + resolved bool // resolved is set to true if base image has been resolved + onBuildInit bool + deps map[*dispatchState]instructions.Command + buildArgs []instructions.KeyValuePairOptional + commands []command // ctxPaths marks the paths this dispatchState uses from the build context. ctxPaths map[string]struct{} // paths marks the paths that are used by this dispatchState. @@ -1286,6 +1310,23 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE } } + if dopt.llbCaps != nil && dopt.llbCaps.Supports(pb.CapExecMetaCDI) == nil { + for _, device := range dopt.devices { + deviceOpts := []llb.CDIDeviceOption{ + llb.CDIDeviceName(device.Name), + } + if device.Optional { + deviceOpts = append(deviceOpts, llb.CDIDeviceOptional) + } + opt = append(opt, llb.AddCDIDevice(deviceOpts...)) + } + runDevices, err := dispatchRunDevices(c) + if err != nil { + return err + } + opt = append(opt, runDevices...) + } + shlex := *dopt.shlex shlex.RawQuotes = true shlex.SkipUnsetEnv = true diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norundevice.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norundevice.go new file mode 100644 index 0000000000..97a2aeef91 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norundevice.go @@ -0,0 +1,12 @@ +//go:build !dfrundevice + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +func dispatchRunDevices(_ *instructions.RunCommand) ([]llb.RunOption, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_rundevice.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_rundevice.go new file mode 100644 index 0000000000..72b44755a4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_rundevice.go @@ -0,0 +1,22 @@ +//go:build dfrundevice + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +func dispatchRunDevices(c *instructions.RunCommand) ([]llb.RunOption, error) { + var out []llb.RunOption + for _, device := range instructions.GetDevices(c) { + deviceOpts := []llb.CDIDeviceOption{ + llb.CDIDeviceName(device.Name), + } + if !device.Required { + deviceOpts = append(deviceOpts, llb.CDIDeviceOptional) + } + out = append(out, llb.AddCDIDevice(deviceOpts...)) + } + return out, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_rundevice.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_rundevice.go new file mode 100644 index 0000000000..693f28bdc1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_rundevice.go @@ -0,0 +1,118 @@ +package instructions + +import ( + "strconv" + "strings" + + "github.com/moby/buildkit/util/suggest" + "github.com/pkg/errors" + "github.com/tonistiigi/go-csvvalue" +) + +var devicesKey = "dockerfile/run/devices" + +func init() { + parseRunPreHooks = append(parseRunPreHooks, runDevicePreHook) + parseRunPostHooks = append(parseRunPostHooks, runDevicePostHook) +} + +func runDevicePreHook(cmd *RunCommand, req parseRequest) error { + st := &deviceState{} + st.flag = req.flags.AddStrings("device") + cmd.setExternalValue(devicesKey, st) + return nil +} + +func runDevicePostHook(cmd *RunCommand, req parseRequest) error { + return setDeviceState(cmd) +} + +func setDeviceState(cmd *RunCommand) error { + st := getDeviceState(cmd) + if st == nil { + return errors.Errorf("no device state") + } + devices := make([]*Device, len(st.flag.StringValues)) + for i, str := range st.flag.StringValues { + d, err := ParseDevice(str) + if err != nil { + return err + } + devices[i] = d + } + st.devices = devices + return nil +} + +func getDeviceState(cmd *RunCommand) *deviceState { + v := cmd.getExternalValue(devicesKey) + if v == nil { + return nil + } + return v.(*deviceState) +} + +func GetDevices(cmd *RunCommand) []*Device { + return getDeviceState(cmd).devices +} + +type deviceState struct { + flag *Flag + devices []*Device +} + +type Device struct { + Name string + Required bool +} + +func ParseDevice(val string) (*Device, error) { + fields, err := csvvalue.Fields(val, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to parse csv devices") + } + + d := &Device{} + + for _, field := range fields { + key, value, ok := strings.Cut(field, "=") + key = strings.ToLower(key) + + if !ok { + switch key { + case "required": + d.Required = true + continue + default: + if d.Name == "" { + d.Name = field + continue + } + // any other option requires a value. + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) + } + } + + switch key { + case "name": + if d.Name != "" { + return nil, errors.Errorf("device name already set to %s", d.Name) + } + d.Name = value + case "required": + d.Required, err = strconv.ParseBool(value) + if err != nil { + return nil, errors.Errorf("invalid value for %s: %s", key, value) + } + default: + if d.Name == "" { + d.Name = field + continue + } + allKeys := []string{"name", "required"} + return nil, suggest.WrapError(errors.Errorf("unexpected key '%s' in '%s'", key, field), key, allKeys, true) + } + } + + return d, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go index a880c886ee..ee8a266754 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go @@ -18,7 +18,6 @@ import ( "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/flightcontrol" - dockerspec "github.com/moby/docker-image-spec/specs-go/v1" "github.com/moby/patternmatcher/ignorefile" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -67,6 +66,7 @@ type Config struct { ShmSize int64 Target string Ulimits []*pb.Ulimit + Devices []*pb.CDIDevice LinterConfig *linter.Config CacheImports []client.CacheOptionsEntry @@ -452,10 +452,10 @@ func (bc *Client) MainContext(ctx context.Context, opts ...llb.LocalOption) (*ll return &st, nil } -func (bc *Client) NamedContext(ctx context.Context, name string, opt ContextOpt) (*llb.State, *dockerspec.DockerOCIImage, error) { +func (bc *Client) NamedContext(name string, opt ContextOpt) (*NamedContext, error) { named, err := reference.ParseNormalizedNamed(name) if err != nil { - return nil, nil, errors.Wrapf(err, "invalid context name %s", name) + return nil, errors.Wrapf(err, "invalid context name %s", name) } name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") @@ -464,11 +464,11 @@ func (bc *Client) NamedContext(ctx context.Context, name string, opt ContextOpt) pp = *opt.Platform } pname := name + "::" + platforms.FormatAll(platforms.Normalize(pp)) - st, img, err := bc.namedContext(ctx, name, pname, opt) - if err != nil || st != nil { - return st, img, err + nc, err := bc.namedContext(name, pname, opt) + if err != nil || nc != nil { + return nc, err } - return bc.namedContext(ctx, name, name, opt) + return bc.namedContext(name, name, opt) } func (bc *Client) IsNoCache(name string) bool { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go index 3bf7343e4e..9598d0b2d9 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go @@ -26,31 +26,51 @@ const ( maxContextRecursion = 10 ) -func (bc *Client) namedContext(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt) (*llb.State, *dockerspec.DockerOCIImage, error) { - return bc.namedContextRecursive(ctx, name, nameWithPlatform, opt, 0) +type NamedContext struct { + input string + bc *Client + name string + nameWithPlatform string + opt ContextOpt } -func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt, count int) (*llb.State, *dockerspec.DockerOCIImage, error) { +func (bc *Client) namedContext(name string, nameWithPlatform string, opt ContextOpt) (*NamedContext, error) { opts := bc.bopts.Opts contextKey := contextPrefix + nameWithPlatform v, ok := opts[contextKey] if !ok { - return nil, nil, nil + return nil, nil } + return &NamedContext{ + input: v, + bc: bc, + name: name, + nameWithPlatform: nameWithPlatform, + opt: opt, + }, nil +} + +func (nc *NamedContext) Load(ctx context.Context) (*llb.State, *dockerspec.DockerOCIImage, error) { + return nc.load(ctx, 0) +} + +func (nc *NamedContext) load(ctx context.Context, count int) (*llb.State, *dockerspec.DockerOCIImage, error) { + opt := nc.opt if count > maxContextRecursion { - return nil, nil, errors.New("context recursion limit exceeded; this may indicate a cycle in the provided source policies: " + v) + return nil, nil, errors.New("context recursion limit exceeded; this may indicate a cycle in the provided source policies: " + nc.input) } - vv := strings.SplitN(v, ":", 2) + vv := strings.SplitN(nc.input, ":", 2) if len(vv) != 2 { - return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, nameWithPlatform) + return nil, nil, errors.Errorf("invalid context specifier %s for %s", nc.input, nc.nameWithPlatform) } // allow git@ without protocol for SSH URLs for backwards compatibility if strings.HasPrefix(vv[0], "git@") { vv[0] = "git" } + switch vv[0] { case "docker-image": ref := strings.TrimPrefix(vv[1], "//") @@ -60,7 +80,7 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi } imgOpt := []llb.ImageOption{ - llb.WithCustomName("[context " + nameWithPlatform + "] " + ref), + llb.WithCustomName("[context " + nc.nameWithPlatform + "] " + ref), } if opt.Platform != nil { imgOpt = append(imgOpt, llb.Platform(*opt.Platform)) @@ -73,8 +93,8 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi named = reference.TagNameOnly(named) - ref, dgst, data, err := bc.client.ResolveImageConfig(ctx, named.String(), sourceresolver.Opt{ - LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, ref), + ref, dgst, data, err := nc.bc.client.ResolveImageConfig(ctx, named.String(), sourceresolver.Opt{ + LogName: fmt.Sprintf("[context %s] load metadata for %s", nc.nameWithPlatform, ref), Platform: opt.Platform, ImageOpt: &sourceresolver.ResolveImageOpt{ ResolveMode: opt.ResolveMode, @@ -88,8 +108,16 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi return nil, nil, errors.Errorf("could not parse ref: %s", e.Updated) } - bc.bopts.Opts[contextKey] = before + ":" + after - return bc.namedContextRecursive(ctx, name, nameWithPlatform, opt, count+1) + nc.bc.bopts.Opts[contextPrefix+nc.nameWithPlatform] = before + ":" + after + + ncnew, err := nc.bc.namedContext(nc.name, nc.nameWithPlatform, nc.opt) + if err != nil { + return nil, nil, err + } + if ncnew == nil { + return nil, nil, nil + } + return ncnew.load(ctx, count+1) } return nil, nil, err } @@ -110,15 +138,15 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi } return &st, &img, nil case "git": - st, ok := DetectGitContext(v, true) + st, ok := DetectGitContext(nc.input, true) if !ok { - return nil, nil, errors.Errorf("invalid git context %s", v) + return nil, nil, errors.Errorf("invalid git context %s", nc.input) } return st, nil, nil case "http", "https": - st, ok := DetectGitContext(v, true) + st, ok := DetectGitContext(nc.input, true) if !ok { - httpst := llb.HTTP(v, llb.WithCustomName("[context "+nameWithPlatform+"] "+v)) + httpst := llb.HTTP(nc.input, llb.WithCustomName("[context "+nc.nameWithPlatform+"] "+nc.input)) st = &httpst } return st, nil, nil @@ -139,21 +167,21 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi // for the dummy ref primarily used in log messages, we can use the // original name, since the store key may not be significant - dummyRef, err := reference.ParseNormalizedNamed(name) + dummyRef, err := reference.ParseNormalizedNamed(nc.name) if err != nil { - return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name) + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", nc.name) } dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest()) if err != nil { - return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name) + return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", nc.name) } - _, dgst, data, err := bc.client.ResolveImageConfig(ctx, dummyRef.String(), sourceresolver.Opt{ - LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, dummyRef.String()), + _, dgst, data, err := nc.bc.client.ResolveImageConfig(ctx, dummyRef.String(), sourceresolver.Opt{ + LogName: fmt.Sprintf("[context %s] load metadata for %s", nc.nameWithPlatform, dummyRef.String()), Platform: opt.Platform, OCILayoutOpt: &sourceresolver.ResolveOCILayoutOpt{ Store: sourceresolver.ResolveImageConfigOptStore{ - SessionID: bc.bopts.SessionID, + SessionID: nc.bc.bopts.SessionID, StoreID: named.Name(), }, }, @@ -168,8 +196,8 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi } ociOpt := []llb.OCILayoutOption{ - llb.WithCustomName("[context " + nameWithPlatform + "] OCI load from client"), - llb.OCIStore(bc.bopts.SessionID, named.Name()), + llb.WithCustomName("[context " + nc.nameWithPlatform + "] OCI load from client"), + llb.OCIStore(nc.bc.bopts.SessionID, named.Name()), } if opt.Platform != nil { ociOpt = append(ociOpt, llb.Platform(*opt.Platform)) @@ -187,22 +215,22 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi } return &st, &img, nil case "local": - sessionID := bc.bopts.SessionID - if v, ok := bc.localsSessionIDs[vv[1]]; ok { + sessionID := nc.bc.bopts.SessionID + if v, ok := nc.bc.localsSessionIDs[vv[1]]; ok { sessionID = v } st := llb.Local(vv[1], llb.SessionID(sessionID), llb.FollowPaths([]string{DefaultDockerignoreName}), - llb.SharedKeyHint("context:"+nameWithPlatform+"-"+DefaultDockerignoreName), - llb.WithCustomName("[context "+nameWithPlatform+"] load "+DefaultDockerignoreName), + llb.SharedKeyHint("context:"+nc.nameWithPlatform+"-"+DefaultDockerignoreName), + llb.WithCustomName("[context "+nc.nameWithPlatform+"] load "+DefaultDockerignoreName), llb.Differ(llb.DiffNone, false), ) def, err := st.Marshal(ctx) if err != nil { return nil, nil, err } - res, err := bc.client.Solve(ctx, client.SolveRequest{ + res, err := nc.bc.client.Solve(ctx, client.SolveRequest{ Evaluate: true, Definition: def.ToPB(), }) @@ -229,7 +257,7 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi localOutput := &asyncLocalOutput{ name: vv[1], - nameWithPlatform: nameWithPlatform, + nameWithPlatform: nc.nameWithPlatform, sessionID: sessionID, excludes: excludes, extraOpts: opt.AsyncLocalOpts, @@ -237,15 +265,15 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi st = llb.NewState(localOutput) return &st, nil, nil case "input": - inputs, err := bc.client.Inputs(ctx) + inputs, err := nc.bc.client.Inputs(ctx) if err != nil { return nil, nil, err } st, ok := inputs[vv[1]] if !ok { - return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], nameWithPlatform) + return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], nc.nameWithPlatform) } - md, ok := opts[inputMetadataPrefix+vv[1]] + md, ok := nc.bc.bopts.Opts[inputMetadataPrefix+vv[1]] if ok { m := make(map[string][]byte) if err := json.Unmarshal([]byte(md), &m); err != nil { @@ -258,14 +286,14 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi return nil, nil, err } if err := json.Unmarshal(dtic, &img); err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", nameWithPlatform) + return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", nc.nameWithPlatform) } } return &st, img, nil } return &st, nil, nil default: - return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], nameWithPlatform) + return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], nc.nameWithPlatform) } } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go index 324455f976..19edcd84f1 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -191,14 +191,22 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten if err != nil { return nil, err } - st, dockerImage, err := dc.NamedContext(ctx, source, dockerui.ContextOpt{ + nc, err := dc.NamedContext(source, dockerui.ContextOpt{ CaptureDigest: &mfstDigest, }) if err != nil { return nil, err } - if dockerImage != nil { - img = *dockerImage + var st *llb.State + if nc != nil { + var dockerImage *dockerspec.DockerOCIImage + st, dockerImage, err = nc.Load(ctx) + if err != nil { + return nil, err + } + if dockerImage != nil { + img = *dockerImage + } } if st == nil { sourceRef, err := reference.ParseNormalizedNamed(source) diff --git a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go index c374bb6cd5..cffc3a86ad 100644 --- a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go +++ b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go @@ -267,19 +267,28 @@ func (s *Store) emptyBranchWithParents(tx *bolt.Tx, id []byte) error { if backlinks := tx.Bucket([]byte(backlinksBucket)).Bucket(id); backlinks != nil { if err := backlinks.ForEach(func(k, v []byte) error { if subLinks := tx.Bucket([]byte(linksBucket)).Bucket(k); subLinks != nil { + // Perform deletion outside of the iteration. + // https://github.com/etcd-io/bbolt/pull/611 + var toDelete []string if err := subLinks.ForEach(func(k, v []byte) error { parts := bytes.Split(k, []byte("@")) if len(parts) != 2 { return errors.Errorf("invalid key %s", k) } if bytes.Equal(id, parts[1]) { - return subLinks.Delete(k) + toDelete = append(toDelete, string(k)) } return nil }); err != nil { return err } + for _, k := range toDelete { + if err := subLinks.Delete([]byte(k)); err != nil { + return err + } + } + if isEmptyBucket(subLinks) { if subResult := tx.Bucket([]byte(resultBucket)).Bucket(k); isEmptyBucket(subResult) { if err := tx.Bucket([]byte(linksBucket)).DeleteBucket(k); err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/cdidevices/manager.go b/vendor/github.com/moby/buildkit/solver/llbsolver/cdidevices/manager.go new file mode 100644 index 0000000000..1996a41c8f --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/cdidevices/manager.go @@ -0,0 +1,250 @@ +package cdidevices + +import ( + "context" + "strings" + + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/locker" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "tags.cncf.io/container-device-interface/pkg/cdi" + "tags.cncf.io/container-device-interface/pkg/parser" +) + +const deviceAnnotationClass = "org.mobyproject.buildkit.device.class" + +var installers = map[string]Setup{} + +type Setup interface { + Validate() error + Run(ctx context.Context) error +} + +// Register registers new setup for a device. +func Register(name string, setup Setup) { + installers[name] = setup +} + +type Device struct { + Name string + AutoAllow bool + OnDemand bool + Annotations map[string]string +} + +type Manager struct { + cache *cdi.Cache + locker *locker.Locker +} + +func NewManager(cache *cdi.Cache) *Manager { + return &Manager{ + cache: cache, + locker: locker.New(), + } +} + +func (m *Manager) ListDevices() []Device { + devs := m.cache.ListDevices() + out := make([]Device, 0, len(devs)) + kinds := make(map[string]struct{}) + for _, dev := range devs { + kind, _, _ := strings.Cut(dev, "=") + dd := m.cache.GetDevice(dev) + out = append(out, Device{ + Name: dev, + AutoAllow: true, // TODO + Annotations: deviceAnnotations(dd), + }) + kinds[kind] = struct{}{} + } + + for k, setup := range installers { + if _, ok := kinds[k]; ok { + continue + } + if err := setup.Validate(); err != nil { + continue + } + out = append(out, Device{ + Name: k, + OnDemand: true, + }) + } + + return out +} + +func (m *Manager) Refresh() error { + return m.cache.Refresh() +} + +func (m *Manager) InjectDevices(spec *specs.Spec, devs ...*pb.CDIDevice) error { + pdevs, err := m.parseDevices(devs...) + if err != nil { + return err + } else if len(pdevs) == 0 { + return nil + } + bklog.G(context.TODO()).Debugf("Injecting devices %v", pdevs) + _, err = m.cache.InjectDevices(spec, pdevs...) + return err +} + +func (m *Manager) parseDevices(devs ...*pb.CDIDevice) ([]string, error) { + var out []string + for _, dev := range devs { + if dev == nil { + continue + } + pdev, err := m.parseDevice(dev) + if err != nil { + return nil, err + } else if len(pdev) == 0 { + continue + } + out = append(out, pdev...) + } + return dedupSlice(out), nil +} + +func (m *Manager) parseDevice(dev *pb.CDIDevice) ([]string, error) { + var out []string + + kind, name, _ := strings.Cut(dev.Name, "=") + + // validate kind + if vendor, class := parser.ParseQualifier(kind); vendor == "" { + return nil, errors.Errorf("invalid device %q", dev.Name) + } else if err := parser.ValidateVendorName(vendor); err != nil { + return nil, errors.Wrapf(err, "invalid device %q", dev.Name) + } else if err := parser.ValidateClassName(class); err != nil { + return nil, errors.Wrapf(err, "invalid device %q", dev.Name) + } + + switch name { + case "": + // first device of kind if no name is specified + for _, d := range m.cache.ListDevices() { + if strings.HasPrefix(d, kind+"=") { + out = append(out, d) + break + } + } + case "*": + // all devices of kind if the name is a wildcard + for _, d := range m.cache.ListDevices() { + if strings.HasPrefix(d, kind+"=") { + out = append(out, d) + } + } + default: + // the specified device + for _, d := range m.cache.ListDevices() { + if d == dev.Name { + out = append(out, d) + break + } + } + if len(out) == 0 { + // check class annotation if name unknown + for _, d := range m.cache.ListDevices() { + if !strings.HasPrefix(d, kind+"=") { + continue + } + if a := deviceAnnotations(m.cache.GetDevice(d)); a != nil { + if class, ok := a[deviceAnnotationClass]; ok && class == name { + out = append(out, d) + } + } + } + } + } + + if len(out) == 0 { + if !dev.Optional { + return nil, errors.Errorf("required device %q is not registered", dev.Name) + } + bklog.G(context.TODO()).Warnf("Optional device %q is not registered", dev.Name) + } + return out, nil +} + +func (m *Manager) hasDevice(name string) bool { + for _, d := range m.cache.ListDevices() { + kind, _, _ := strings.Cut(d, "=") + if kind == name { + return true + } + } + return false +} + +func (m *Manager) OnDemandInstaller(name string) (func(context.Context) error, bool) { + name, _, _ = strings.Cut(name, "=") + + installer, ok := installers[name] + if !ok { + return nil, false + } + + if m.hasDevice(name) { + return nil, false + } + + return func(ctx context.Context) error { + m.locker.Lock(name) + defer m.locker.Unlock(name) + + if m.hasDevice(name) { + return nil + } + + if err := installer.Validate(); err != nil { + return errors.Wrapf(err, "failed to find preconditions for %s device", name) + } + + if err := installer.Run(ctx); err != nil { + return errors.Wrapf(err, "failed to create %s device", name) + } + + if err := m.cache.Refresh(); err != nil { + return errors.Wrapf(err, "failed to refresh CDI cache") + } + + return nil + }, true +} + +func deviceAnnotations(dev *cdi.Device) map[string]string { + if dev == nil { + return nil + } + out := make(map[string]string) + // spec annotations + for k, v := range dev.GetSpec().Annotations { + out[k] = v + } + // device annotations + for k, v := range dev.Device.Annotations { + out[k] = v + } + return out +} + +func dedupSlice(s []string) []string { + if len(s) == 0 { + return s + } + var res []string + seen := make(map[string]struct{}) + for _, val := range s { + if _, ok := seen[val]; !ok { + res = append(res, val) + seen[val] = struct{}{} + } + } + return res +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/history.go b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go index 1e2c6e3f64..cdbc094c75 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/history.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go @@ -7,6 +7,7 @@ import ( "encoding/json" "io" "os" + "slices" "sort" "strconv" "strings" @@ -15,6 +16,7 @@ import ( "github.com/containerd/containerd/v2/core/content" "github.com/containerd/containerd/v2/core/leases" + "github.com/containerd/containerd/v2/pkg/filters" cerrdefs "github.com/containerd/errdefs" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client" @@ -23,12 +25,14 @@ import ( containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/db" + "github.com/moby/buildkit/util/gitutil" "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/iohelper" "github.com/moby/buildkit/util/leaseutil" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/tonistiigi/go-csvvalue" bolt "go.etcd.io/bbolt" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" @@ -41,6 +45,13 @@ const ( versionBucket = "_version" ) +const ( + statusRunning = "running" + statusCompleted = "completed" + statusError = "error" + statusCanceled = "canceled" +) + type HistoryQueueOpt struct { DB db.Transactor LeaseManager *leaseutil.Manager @@ -997,6 +1008,12 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR } } h.mu.Unlock() + + events, err := filterHistoryEvents(events, req.Filter, req.Limit) + if err != nil { + return err + } + for _, e := range events { if e == nil || e.Record == nil { continue @@ -1028,6 +1045,207 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR } } +func filterHistoryEvents(in []*controlapi.BuildHistoryEvent, filters []string, limit int32) ([]*controlapi.BuildHistoryEvent, error) { + f, err := parseFilters(filters) + if err != nil { + return nil, err + } + + out := make([]*controlapi.BuildHistoryEvent, 0, len(in)) + + if len(f) == 0 { + out = in + } else { + loop0: + for _, ev := range in { + for _, fn := range f { + if fn(ev) { + out = append(out, ev) + continue loop0 + } + } + } + } + + if limit != 0 { + if limit < 0 { + return nil, errors.Errorf("invalid limit %d", limit) + } + slices.SortFunc(out, func(a, b *controlapi.BuildHistoryEvent) int { + if a.Record == nil || b.Record == nil { + return 0 + } + if a.Record == nil { + return 1 + } + return b.Record.CreatedAt.AsTime().Compare(a.Record.CreatedAt.AsTime()) + }) + if int32(len(out)) > limit { + out = out[:limit] + } + } + return out, nil +} + +func parseFilters(in []string) ([]func(*controlapi.BuildHistoryEvent) bool, error) { + if len(in) == 0 { + return nil, nil + } + + var out []func(*controlapi.BuildHistoryEvent) bool + for _, in := range in { + fns, err := parseFilter(in) + if err != nil { + return nil, err + } + out = append(out, func(ev *controlapi.BuildHistoryEvent) bool { + for _, fn := range fns { + if !fn(ev) { + return false + } + } + return true + }) + } + return out, nil +} + +func timeBasedFilter(f string) (func(*controlapi.BuildHistoryEvent) bool, error) { + key, sep, value, _ := cutAny(f, []string{">=", "<=", ">", "<"}) + var cmp int64 + switch key { + case "startedAt", "completedAt": + v, err := time.ParseDuration(value) + if err == nil { + tm := time.Now().Add(-v) + cmp = tm.Unix() + } else { + tm, err := time.Parse(time.RFC3339, value) + if err != nil { + return nil, errors.Errorf("invalid time %s", value) + } + cmp = tm.Unix() + } + case "duration": + v, err := time.ParseDuration(value) + if err != nil { + return nil, errors.Errorf("invalid duration %s", value) + } + cmp = int64(v) + default: + return nil, nil + } + + return func(ev *controlapi.BuildHistoryEvent) bool { + if ev.Record == nil { + return false + } + var val int64 + switch key { + case "startedAt": + val = ev.Record.CreatedAt.AsTime().Unix() + case "completedAt": + if ev.Record.CompletedAt != nil { + val = ev.Record.CompletedAt.AsTime().Unix() + } + case "duration": + if ev.Record.CompletedAt != nil { + val = int64(ev.Record.CompletedAt.AsTime().Sub(ev.Record.CreatedAt.AsTime())) + } + } + switch sep { + case ">=": + return val >= cmp + case "<=": + return val <= cmp + case ">": + return val > cmp + default: + return val < cmp + } + }, nil +} + +func parseFilter(in string) ([]func(*controlapi.BuildHistoryEvent) bool, error) { + var out []func(*controlapi.BuildHistoryEvent) bool + + fields, err := csvvalue.Fields(in, nil) + if err != nil { + return nil, err + } + var staticFilters []string + + for _, f := range fields { + fn, err := timeBasedFilter(f) + if err != nil { + return nil, err + } + if fn == nil { + staticFilters = append(staticFilters, f) + continue + } + out = append(out, fn) + } + + filter, err := filters.ParseAll(strings.Join(staticFilters, ",")) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse history filters %v", in) + } + + out = append(out, func(ev *controlapi.BuildHistoryEvent) bool { + if ev.Record == nil { + return false + } + return filter.Match(adaptHistoryRecord(ev.Record)) + }) + return out, nil +} + +func adaptHistoryRecord(rec *controlapi.BuildHistoryRecord) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "ref": + return rec.Ref, rec.Ref != "" + case "status": + if rec.CompletedAt != nil { + if rec.Error != nil { + if strings.Contains(rec.Error.Message, "context canceled") { + return statusCanceled, true + } + return statusError, true + } + return statusCompleted, true + } + return statusRunning, true + case "repository": + v, ok := rec.FrontendAttrs["vcs:source"] + if ok { + return v, true + } + if context, ok := rec.FrontendAttrs["context"]; ok { + if ref, err := gitutil.ParseGitRef(context); err == nil { + return ref.Remote, true + } + } + return "", false + } + return "", false + }) +} + +func cutAny(in string, opt []string) (before string, sep string, after string, found bool) { + for _, s := range opt { + if i := strings.Index(in, s); i >= 0 { + return in[:i], s, in[i+len(s):], true + } + } + return "", "", "", false +} + type pubsub[T any] struct { mu sync.Mutex m map[*channel[T]]struct{} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go index 11184ae74d..483e76f872 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -199,6 +199,20 @@ func (e *ExecOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol cm.Deps[i].PreprocessFunc = unlazyResultFunc } + for _, d := range e.op.CdiDevices { + setup, ok := e.w.CDIManager().OnDemandInstaller(d.Name) + if ok { + prev := cm.Deps[0].PreprocessFunc + cm.Deps[0].PreprocessFunc = func(ctx context.Context, r solver.Result, g session.Group) error { + if err := prev(ctx, r, g); err != nil { + return err + } + // we could pass glibc/musl type in here based on rootfs to get correct dynamic libs + return setup(ctx) + } + } + } + return cm, true, nil } @@ -432,6 +446,7 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu ReadonlyRootFS: p.ReadonlyRootFS, ExtraHosts: extraHosts, Ulimit: e.op.Meta.Ulimit, + CDIDevices: e.op.CdiDevices, CgroupParent: e.op.Meta.CgroupParent, NetMode: e.op.Network, SecurityMode: e.op.Security, diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go index 8ba462978b..eda77165e2 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -47,6 +47,7 @@ const ( CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1" CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" CapExecMetaUlimit apicaps.CapID = "exec.meta.ulimit" + CapExecMetaCDI apicaps.CapID = "exec.meta.cdi" CapExecMetaRemoveMountStubsRecursive apicaps.CapID = "exec.meta.removemountstubs.recursive" CapExecMountBind apicaps.CapID = "exec.mount.bind" CapExecMountBindReadWriteNoOutput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" @@ -96,6 +97,9 @@ const ( // GC/Prune controls allow MinFreeSpace and MaxUsedSpace to be set CapGCFreeSpaceFilter apicaps.CapID = "gc.freespacefilter" + + // ListenBuildHistory requests support server-side filters + CapHistoryFilters apicaps.CapID = "history.filter" ) func init() { @@ -291,6 +295,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapExecMetaCDI, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapExecMountBind, Enabled: true, @@ -453,7 +463,7 @@ func init() { Caps.Init(apicaps.Cap{ ID: CapRemoteCacheAzBlob, - Enabled: true, + Enabled: false, Status: apicaps.CapStatusExperimental, }) @@ -505,4 +515,10 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapHistoryFilters, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go index 6975da4cee..4acac63c24 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -579,11 +579,12 @@ type ExecOp struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` - Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` - Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` - Secretenv []*SecretEnv `protobuf:"bytes,5,rep,name=secretenv,proto3" json:"secretenv,omitempty"` + Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` + Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` + Secretenv []*SecretEnv `protobuf:"bytes,5,rep,name=secretenv,proto3" json:"secretenv,omitempty"` + CdiDevices []*CDIDevice `protobuf:"bytes,6,rep,name=cdiDevices,proto3" json:"cdiDevices,omitempty"` } func (x *ExecOp) Reset() { @@ -651,6 +652,13 @@ func (x *ExecOp) GetSecretenv() []*SecretEnv { return nil } +func (x *ExecOp) GetCdiDevices() []*CDIDevice { + if x != nil { + return x.CdiDevices + } + return nil +} + // Meta is a set of arguments for ExecOp. // Meta is unrelated to LLB metadata. // FIXME: rename (ExecContext? ExecArgs?) @@ -955,6 +963,63 @@ func (x *SecretEnv) GetOptional() bool { return false } +// CDIDevice specifies a CDI device information. +type CDIDevice struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully qualified CDI device name (e.g., vendor.com/gpu=gpudevice1) + // https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Optional defines if CDI device is required. + Optional bool `protobuf:"varint,2,opt,name=optional,proto3" json:"optional,omitempty"` +} + +func (x *CDIDevice) Reset() { + *x = CDIDevice{} + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CDIDevice) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CDIDevice) ProtoMessage() {} + +func (x *CDIDevice) ProtoReflect() protoreflect.Message { + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CDIDevice.ProtoReflect.Descriptor instead. +func (*CDIDevice) Descriptor() ([]byte, []int) { + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{8} +} + +func (x *CDIDevice) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CDIDevice) GetOptional() bool { + if x != nil { + return x.Optional + } + return false +} + // Mount specifies how to mount an input Op as a filesystem. type Mount struct { state protoimpl.MessageState @@ -977,7 +1042,7 @@ type Mount struct { func (x *Mount) Reset() { *x = Mount{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[8] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -989,7 +1054,7 @@ func (x *Mount) String() string { func (*Mount) ProtoMessage() {} func (x *Mount) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[8] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1002,7 +1067,7 @@ func (x *Mount) ProtoReflect() protoreflect.Message { // Deprecated: Use Mount.ProtoReflect.Descriptor instead. func (*Mount) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{8} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{9} } func (x *Mount) GetInput() int64 { @@ -1101,7 +1166,7 @@ type TmpfsOpt struct { func (x *TmpfsOpt) Reset() { *x = TmpfsOpt{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[9] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1113,7 +1178,7 @@ func (x *TmpfsOpt) String() string { func (*TmpfsOpt) ProtoMessage() {} func (x *TmpfsOpt) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[9] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1126,7 +1191,7 @@ func (x *TmpfsOpt) ProtoReflect() protoreflect.Message { // Deprecated: Use TmpfsOpt.ProtoReflect.Descriptor instead. func (*TmpfsOpt) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{9} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{10} } func (x *TmpfsOpt) GetSize() int64 { @@ -1150,7 +1215,7 @@ type CacheOpt struct { func (x *CacheOpt) Reset() { *x = CacheOpt{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[10] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1162,7 +1227,7 @@ func (x *CacheOpt) String() string { func (*CacheOpt) ProtoMessage() {} func (x *CacheOpt) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[10] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1175,7 +1240,7 @@ func (x *CacheOpt) ProtoReflect() protoreflect.Message { // Deprecated: Use CacheOpt.ProtoReflect.Descriptor instead. func (*CacheOpt) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{10} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{11} } func (x *CacheOpt) GetID() string { @@ -1213,7 +1278,7 @@ type SecretOpt struct { func (x *SecretOpt) Reset() { *x = SecretOpt{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[11] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1225,7 +1290,7 @@ func (x *SecretOpt) String() string { func (*SecretOpt) ProtoMessage() {} func (x *SecretOpt) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[11] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1238,7 +1303,7 @@ func (x *SecretOpt) ProtoReflect() protoreflect.Message { // Deprecated: Use SecretOpt.ProtoReflect.Descriptor instead. func (*SecretOpt) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{11} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{12} } func (x *SecretOpt) GetID() string { @@ -1297,7 +1362,7 @@ type SSHOpt struct { func (x *SSHOpt) Reset() { *x = SSHOpt{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[12] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1309,7 +1374,7 @@ func (x *SSHOpt) String() string { func (*SSHOpt) ProtoMessage() {} func (x *SSHOpt) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[12] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1322,7 +1387,7 @@ func (x *SSHOpt) ProtoReflect() protoreflect.Message { // Deprecated: Use SSHOpt.ProtoReflect.Descriptor instead. func (*SSHOpt) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{12} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{13} } func (x *SSHOpt) GetID() string { @@ -1375,7 +1440,7 @@ type SourceOp struct { func (x *SourceOp) Reset() { *x = SourceOp{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[13] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1387,7 +1452,7 @@ func (x *SourceOp) String() string { func (*SourceOp) ProtoMessage() {} func (x *SourceOp) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[13] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1400,7 +1465,7 @@ func (x *SourceOp) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceOp.ProtoReflect.Descriptor instead. func (*SourceOp) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{13} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{14} } func (x *SourceOp) GetIdentifier() string { @@ -1432,7 +1497,7 @@ type BuildOp struct { func (x *BuildOp) Reset() { *x = BuildOp{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[14] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1444,7 +1509,7 @@ func (x *BuildOp) String() string { func (*BuildOp) ProtoMessage() {} func (x *BuildOp) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[14] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1457,7 +1522,7 @@ func (x *BuildOp) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildOp.ProtoReflect.Descriptor instead. func (*BuildOp) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{14} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{15} } func (x *BuildOp) GetBuilder() int64 { @@ -1499,7 +1564,7 @@ type BuildInput struct { func (x *BuildInput) Reset() { *x = BuildInput{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[15] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1511,7 +1576,7 @@ func (x *BuildInput) String() string { func (*BuildInput) ProtoMessage() {} func (x *BuildInput) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[15] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1524,7 +1589,7 @@ func (x *BuildInput) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildInput.ProtoReflect.Descriptor instead. func (*BuildInput) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{15} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{16} } func (x *BuildInput) GetInput() int64 { @@ -1553,7 +1618,7 @@ type OpMetadata struct { func (x *OpMetadata) Reset() { *x = OpMetadata{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[16] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1565,7 +1630,7 @@ func (x *OpMetadata) String() string { func (*OpMetadata) ProtoMessage() {} func (x *OpMetadata) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[16] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1578,7 +1643,7 @@ func (x *OpMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use OpMetadata.ProtoReflect.Descriptor instead. func (*OpMetadata) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{16} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{17} } func (x *OpMetadata) GetIgnoreCache() bool { @@ -1628,7 +1693,7 @@ type Source struct { func (x *Source) Reset() { *x = Source{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[17] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1640,7 +1705,7 @@ func (x *Source) String() string { func (*Source) ProtoMessage() {} func (x *Source) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[17] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1653,7 +1718,7 @@ func (x *Source) ProtoReflect() protoreflect.Message { // Deprecated: Use Source.ProtoReflect.Descriptor instead. func (*Source) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{17} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{18} } func (x *Source) GetLocations() map[string]*Locations { @@ -1681,7 +1746,7 @@ type Locations struct { func (x *Locations) Reset() { *x = Locations{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[18] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1693,7 +1758,7 @@ func (x *Locations) String() string { func (*Locations) ProtoMessage() {} func (x *Locations) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[18] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1706,7 +1771,7 @@ func (x *Locations) ProtoReflect() protoreflect.Message { // Deprecated: Use Locations.ProtoReflect.Descriptor instead. func (*Locations) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{18} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{19} } func (x *Locations) GetLocations() []*Location { @@ -1730,7 +1795,7 @@ type SourceInfo struct { func (x *SourceInfo) Reset() { *x = SourceInfo{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[19] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1742,7 +1807,7 @@ func (x *SourceInfo) String() string { func (*SourceInfo) ProtoMessage() {} func (x *SourceInfo) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[19] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1755,7 +1820,7 @@ func (x *SourceInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. func (*SourceInfo) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{19} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{20} } func (x *SourceInfo) GetFilename() string { @@ -1798,7 +1863,7 @@ type Location struct { func (x *Location) Reset() { *x = Location{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[20] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1810,7 +1875,7 @@ func (x *Location) String() string { func (*Location) ProtoMessage() {} func (x *Location) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[20] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1823,7 +1888,7 @@ func (x *Location) ProtoReflect() protoreflect.Message { // Deprecated: Use Location.ProtoReflect.Descriptor instead. func (*Location) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{20} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{21} } func (x *Location) GetSourceIndex() int32 { @@ -1852,7 +1917,7 @@ type Range struct { func (x *Range) Reset() { *x = Range{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[21] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1864,7 +1929,7 @@ func (x *Range) String() string { func (*Range) ProtoMessage() {} func (x *Range) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[21] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1877,7 +1942,7 @@ func (x *Range) ProtoReflect() protoreflect.Message { // Deprecated: Use Range.ProtoReflect.Descriptor instead. func (*Range) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{21} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{22} } func (x *Range) GetStart() *Position { @@ -1906,7 +1971,7 @@ type Position struct { func (x *Position) Reset() { *x = Position{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[22] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1918,7 +1983,7 @@ func (x *Position) String() string { func (*Position) ProtoMessage() {} func (x *Position) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[22] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1931,7 +1996,7 @@ func (x *Position) ProtoReflect() protoreflect.Message { // Deprecated: Use Position.ProtoReflect.Descriptor instead. func (*Position) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{22} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{23} } func (x *Position) GetLine() int32 { @@ -1958,7 +2023,7 @@ type ExportCache struct { func (x *ExportCache) Reset() { *x = ExportCache{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[23] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1970,7 +2035,7 @@ func (x *ExportCache) String() string { func (*ExportCache) ProtoMessage() {} func (x *ExportCache) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[23] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1983,7 +2048,7 @@ func (x *ExportCache) ProtoReflect() protoreflect.Message { // Deprecated: Use ExportCache.ProtoReflect.Descriptor instead. func (*ExportCache) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{23} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{24} } func (x *ExportCache) GetValue() bool { @@ -2005,7 +2070,7 @@ type ProgressGroup struct { func (x *ProgressGroup) Reset() { *x = ProgressGroup{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[24] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2017,7 +2082,7 @@ func (x *ProgressGroup) String() string { func (*ProgressGroup) ProtoMessage() {} func (x *ProgressGroup) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[24] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2030,7 +2095,7 @@ func (x *ProgressGroup) ProtoReflect() protoreflect.Message { // Deprecated: Use ProgressGroup.ProtoReflect.Descriptor instead. func (*ProgressGroup) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{24} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{25} } func (x *ProgressGroup) GetId() string { @@ -2068,7 +2133,7 @@ type ProxyEnv struct { func (x *ProxyEnv) Reset() { *x = ProxyEnv{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[25] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2080,7 +2145,7 @@ func (x *ProxyEnv) String() string { func (*ProxyEnv) ProtoMessage() {} func (x *ProxyEnv) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[25] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2093,7 +2158,7 @@ func (x *ProxyEnv) ProtoReflect() protoreflect.Message { // Deprecated: Use ProxyEnv.ProtoReflect.Descriptor instead. func (*ProxyEnv) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{25} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{26} } func (x *ProxyEnv) GetHttpProxy() string { @@ -2142,7 +2207,7 @@ type WorkerConstraints struct { func (x *WorkerConstraints) Reset() { *x = WorkerConstraints{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[26] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2154,7 +2219,7 @@ func (x *WorkerConstraints) String() string { func (*WorkerConstraints) ProtoMessage() {} func (x *WorkerConstraints) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[26] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2167,7 +2232,7 @@ func (x *WorkerConstraints) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkerConstraints.ProtoReflect.Descriptor instead. func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{26} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{27} } func (x *WorkerConstraints) GetFilter() []string { @@ -2194,7 +2259,7 @@ type Definition struct { func (x *Definition) Reset() { *x = Definition{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[27] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2206,7 +2271,7 @@ func (x *Definition) String() string { func (*Definition) ProtoMessage() {} func (x *Definition) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[27] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2219,7 +2284,7 @@ func (x *Definition) ProtoReflect() protoreflect.Message { // Deprecated: Use Definition.ProtoReflect.Descriptor instead. func (*Definition) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{27} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{28} } func (x *Definition) GetDef() [][]byte { @@ -2253,7 +2318,7 @@ type FileOp struct { func (x *FileOp) Reset() { *x = FileOp{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[28] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2265,7 +2330,7 @@ func (x *FileOp) String() string { func (*FileOp) ProtoMessage() {} func (x *FileOp) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[28] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2278,7 +2343,7 @@ func (x *FileOp) ProtoReflect() protoreflect.Message { // Deprecated: Use FileOp.ProtoReflect.Descriptor instead. func (*FileOp) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{28} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{29} } func (x *FileOp) GetActions() []*FileAction { @@ -2309,7 +2374,7 @@ type FileAction struct { func (x *FileAction) Reset() { *x = FileAction{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[29] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2321,7 +2386,7 @@ func (x *FileAction) String() string { func (*FileAction) ProtoMessage() {} func (x *FileAction) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[29] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2334,7 +2399,7 @@ func (x *FileAction) ProtoReflect() protoreflect.Message { // Deprecated: Use FileAction.ProtoReflect.Descriptor instead. func (*FileAction) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{29} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{30} } func (x *FileAction) GetInput() int64 { @@ -2478,7 +2543,7 @@ type FileActionCopy struct { func (x *FileActionCopy) Reset() { *x = FileActionCopy{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[30] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2490,7 +2555,7 @@ func (x *FileActionCopy) String() string { func (*FileActionCopy) ProtoMessage() {} func (x *FileActionCopy) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[30] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2503,7 +2568,7 @@ func (x *FileActionCopy) ProtoReflect() protoreflect.Message { // Deprecated: Use FileActionCopy.ProtoReflect.Descriptor instead. func (*FileActionCopy) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{30} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{31} } func (x *FileActionCopy) GetSrc() string { @@ -2630,7 +2695,7 @@ type FileActionMkFile struct { func (x *FileActionMkFile) Reset() { *x = FileActionMkFile{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[31] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2642,7 +2707,7 @@ func (x *FileActionMkFile) String() string { func (*FileActionMkFile) ProtoMessage() {} func (x *FileActionMkFile) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[31] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2655,7 +2720,7 @@ func (x *FileActionMkFile) ProtoReflect() protoreflect.Message { // Deprecated: Use FileActionMkFile.ProtoReflect.Descriptor instead. func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{31} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{32} } func (x *FileActionMkFile) GetPath() string { @@ -2710,7 +2775,7 @@ type FileActionSymlink struct { func (x *FileActionSymlink) Reset() { *x = FileActionSymlink{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[32] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2722,7 +2787,7 @@ func (x *FileActionSymlink) String() string { func (*FileActionSymlink) ProtoMessage() {} func (x *FileActionSymlink) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[32] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2735,7 +2800,7 @@ func (x *FileActionSymlink) ProtoReflect() protoreflect.Message { // Deprecated: Use FileActionSymlink.ProtoReflect.Descriptor instead. func (*FileActionSymlink) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{32} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{33} } func (x *FileActionSymlink) GetOldpath() string { @@ -2785,7 +2850,7 @@ type FileActionMkDir struct { func (x *FileActionMkDir) Reset() { *x = FileActionMkDir{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[33] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2797,7 +2862,7 @@ func (x *FileActionMkDir) String() string { func (*FileActionMkDir) ProtoMessage() {} func (x *FileActionMkDir) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[33] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2810,7 +2875,7 @@ func (x *FileActionMkDir) ProtoReflect() protoreflect.Message { // Deprecated: Use FileActionMkDir.ProtoReflect.Descriptor instead. func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{33} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{34} } func (x *FileActionMkDir) GetPath() string { @@ -2863,7 +2928,7 @@ type FileActionRm struct { func (x *FileActionRm) Reset() { *x = FileActionRm{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[34] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2875,7 +2940,7 @@ func (x *FileActionRm) String() string { func (*FileActionRm) ProtoMessage() {} func (x *FileActionRm) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[34] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2888,7 +2953,7 @@ func (x *FileActionRm) ProtoReflect() protoreflect.Message { // Deprecated: Use FileActionRm.ProtoReflect.Descriptor instead. func (*FileActionRm) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{34} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{35} } func (x *FileActionRm) GetPath() string { @@ -2923,7 +2988,7 @@ type ChownOpt struct { func (x *ChownOpt) Reset() { *x = ChownOpt{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[35] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2935,7 +3000,7 @@ func (x *ChownOpt) String() string { func (*ChownOpt) ProtoMessage() {} func (x *ChownOpt) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[35] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2948,7 +3013,7 @@ func (x *ChownOpt) ProtoReflect() protoreflect.Message { // Deprecated: Use ChownOpt.ProtoReflect.Descriptor instead. func (*ChownOpt) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{35} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{36} } func (x *ChownOpt) GetUser() *UserOpt { @@ -2981,7 +3046,7 @@ type UserOpt struct { func (x *UserOpt) Reset() { *x = UserOpt{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[36] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2993,7 +3058,7 @@ func (x *UserOpt) String() string { func (*UserOpt) ProtoMessage() {} func (x *UserOpt) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[36] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3006,7 +3071,7 @@ func (x *UserOpt) ProtoReflect() protoreflect.Message { // Deprecated: Use UserOpt.ProtoReflect.Descriptor instead. func (*UserOpt) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{36} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{37} } func (m *UserOpt) GetUser() isUserOpt_User { @@ -3057,7 +3122,7 @@ type NamedUserOpt struct { func (x *NamedUserOpt) Reset() { *x = NamedUserOpt{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[37] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3069,7 +3134,7 @@ func (x *NamedUserOpt) String() string { func (*NamedUserOpt) ProtoMessage() {} func (x *NamedUserOpt) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[37] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3082,7 +3147,7 @@ func (x *NamedUserOpt) ProtoReflect() protoreflect.Message { // Deprecated: Use NamedUserOpt.ProtoReflect.Descriptor instead. func (*NamedUserOpt) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{37} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{38} } func (x *NamedUserOpt) GetName() string { @@ -3109,7 +3174,7 @@ type MergeInput struct { func (x *MergeInput) Reset() { *x = MergeInput{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[38] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3121,7 +3186,7 @@ func (x *MergeInput) String() string { func (*MergeInput) ProtoMessage() {} func (x *MergeInput) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[38] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3134,7 +3199,7 @@ func (x *MergeInput) ProtoReflect() protoreflect.Message { // Deprecated: Use MergeInput.ProtoReflect.Descriptor instead. func (*MergeInput) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{38} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{39} } func (x *MergeInput) GetInput() int64 { @@ -3154,7 +3219,7 @@ type MergeOp struct { func (x *MergeOp) Reset() { *x = MergeOp{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[39] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3166,7 +3231,7 @@ func (x *MergeOp) String() string { func (*MergeOp) ProtoMessage() {} func (x *MergeOp) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[39] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3179,7 +3244,7 @@ func (x *MergeOp) ProtoReflect() protoreflect.Message { // Deprecated: Use MergeOp.ProtoReflect.Descriptor instead. func (*MergeOp) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{39} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{40} } func (x *MergeOp) GetInputs() []*MergeInput { @@ -3199,7 +3264,7 @@ type LowerDiffInput struct { func (x *LowerDiffInput) Reset() { *x = LowerDiffInput{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[40] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3211,7 +3276,7 @@ func (x *LowerDiffInput) String() string { func (*LowerDiffInput) ProtoMessage() {} func (x *LowerDiffInput) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[40] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3224,7 +3289,7 @@ func (x *LowerDiffInput) ProtoReflect() protoreflect.Message { // Deprecated: Use LowerDiffInput.ProtoReflect.Descriptor instead. func (*LowerDiffInput) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{40} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{41} } func (x *LowerDiffInput) GetInput() int64 { @@ -3244,7 +3309,7 @@ type UpperDiffInput struct { func (x *UpperDiffInput) Reset() { *x = UpperDiffInput{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[41] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3256,7 +3321,7 @@ func (x *UpperDiffInput) String() string { func (*UpperDiffInput) ProtoMessage() {} func (x *UpperDiffInput) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[41] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3269,7 +3334,7 @@ func (x *UpperDiffInput) ProtoReflect() protoreflect.Message { // Deprecated: Use UpperDiffInput.ProtoReflect.Descriptor instead. func (*UpperDiffInput) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{41} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{42} } func (x *UpperDiffInput) GetInput() int64 { @@ -3290,7 +3355,7 @@ type DiffOp struct { func (x *DiffOp) Reset() { *x = DiffOp{} - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[42] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3302,7 +3367,7 @@ func (x *DiffOp) String() string { func (*DiffOp) ProtoMessage() {} func (x *DiffOp) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[42] + mi := &file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3315,7 +3380,7 @@ func (x *DiffOp) ProtoReflect() protoreflect.Message { // Deprecated: Use DiffOp.ProtoReflect.Descriptor instead. func (*DiffOp) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{42} + return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{43} } func (x *DiffOp) GetLower() *LowerDiffInput { @@ -3373,7 +3438,7 @@ var file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc = []byte{ 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x35, 0x0a, 0x05, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xcb, 0x01, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xfa, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x65, 0x63, 0x4f, 0x70, 0x12, 0x1c, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, @@ -3386,361 +3451,368 @@ var file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc = []byte{ 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2b, 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x45, 0x6e, 0x76, - 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x76, 0x22, 0xf3, 0x02, 0x0a, 0x04, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x77, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x77, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x75, 0x73, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, - 0x12, 0x29, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, - 0x76, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, 0x12, 0x2a, 0x0a, 0x0a, 0x65, - 0x78, 0x74, 0x72, 0x61, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x50, 0x52, 0x0a, 0x65, 0x78, 0x74, - 0x72, 0x61, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x75, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, - 0x06, 0x75, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x19, 0x72, + 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x76, 0x12, 0x2d, 0x0a, 0x0a, 0x63, + 0x64, 0x69, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0a, + 0x63, 0x64, 0x69, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x22, 0xf3, 0x02, 0x0a, 0x04, 0x4d, + 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x77, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x77, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x73, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, + 0x29, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, 0x12, 0x2a, 0x0a, 0x0a, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, + 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x50, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x75, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x06, + 0x75, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x19, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x75, 0x62, 0x73, 0x52, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x75, 0x62, 0x73, 0x52, - 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x75, 0x62, 0x73, - 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x45, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x45, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, - 0x73, 0x22, 0x2c, 0x0a, 0x06, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x48, - 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x22, - 0x44, 0x0a, 0x06, 0x55, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x53, 0x6f, 0x66, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x6f, 0x66, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x04, 0x48, 0x61, 0x72, 0x64, 0x22, 0x4b, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x45, - 0x6e, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x22, 0xaa, 0x03, 0x0a, 0x05, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, - 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, - 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x4d, - 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x08, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x18, - 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6d, 0x70, 0x66, 0x73, - 0x4f, 0x70, 0x74, 0x52, 0x08, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x12, 0x28, 0x0a, - 0x08, 0x63, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x52, 0x08, 0x63, - 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x12, 0x2b, 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x4f, 0x70, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x4f, 0x70, 0x74, 0x12, 0x22, 0x0a, 0x06, 0x53, 0x53, 0x48, 0x4f, 0x70, 0x74, 0x18, 0x16, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x53, 0x48, 0x4f, 0x70, 0x74, - 0x52, 0x06, 0x53, 0x53, 0x48, 0x4f, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x49, 0x44, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x49, 0x44, 0x12, 0x39, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, - 0x61, 0x63, 0x68, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, - 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x22, - 0x1e, 0x0a, 0x08, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, - 0x49, 0x0a, 0x08, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x2d, 0x0a, 0x07, 0x73, - 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x70, - 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4f, 0x70, - 0x74, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x6f, 0x0a, 0x09, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6d, - 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0x6c, 0x0a, 0x06, 0x53, - 0x53, 0x48, 0x4f, 0x70, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0x93, 0x01, 0x0a, 0x08, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4f, 0x70, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, - 0x61, 0x74, 0x74, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xa9, 0x02, 0x0a, 0x07, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4f, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, - 0x4f, 0x70, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x64, 0x65, 0x66, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x64, 0x65, 0x66, 0x12, 0x2c, 0x0a, 0x05, 0x61, 0x74, 0x74, 0x72, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x4f, 0x70, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0b, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x45, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x05, + 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x45, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x73, + 0x22, 0x2c, 0x0a, 0x06, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x49, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x22, 0x44, + 0x0a, 0x06, 0x55, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x53, 0x6f, 0x66, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x6f, 0x66, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x48, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, + 0x48, 0x61, 0x72, 0x64, 0x22, 0x4b, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x45, 0x6e, + 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, + 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x22, 0x3b, 0x0a, 0x09, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xaa, + 0x03, 0x0a, 0x05, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x65, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, + 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, + 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x28, 0x0a, 0x08, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x18, 0x13, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x52, + 0x08, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x12, 0x28, 0x0a, 0x08, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x4f, 0x70, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, + 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x52, 0x08, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x4f, 0x70, 0x74, 0x12, 0x2b, 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x70, 0x74, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x4f, 0x70, 0x74, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x70, 0x74, + 0x12, 0x22, 0x0a, 0x06, 0x53, 0x53, 0x48, 0x4f, 0x70, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x53, 0x48, 0x4f, 0x70, 0x74, 0x52, 0x06, 0x53, 0x53, + 0x48, 0x4f, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x44, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x44, + 0x12, 0x39, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x22, 0x1e, 0x0a, 0x08, 0x54, + 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x49, 0x0a, 0x08, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x52, 0x07, 0x73, + 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x6f, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x4f, 0x70, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0x6c, 0x0a, 0x06, 0x53, 0x53, 0x48, 0x4f, 0x70, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, + 0x44, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0x93, 0x01, 0x0a, 0x08, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4f, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x2e, + 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x61, 0x74, 0x74, 0x72, + 0x73, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x22, 0x0a, 0x0a, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, - 0x87, 0x03, 0x0a, 0x0a, 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, - 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, - 0x61, 0x63, 0x68, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, - 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, - 0x6f, 0x72, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x04, 0x63, 0x61, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, - 0x73, 0x73, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x1a, 0x3e, 0x0a, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x1a, 0x37, 0x0a, 0x09, 0x43, 0x61, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb4, 0x01, 0x0a, 0x06, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, - 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, 0x6e, - 0x66, 0x6f, 0x73, 0x1a, 0x4b, 0x0a, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x37, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x0a, - 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x0a, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0a, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x64, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x64, - 0x65, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x4b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x22, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x3c, 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, - 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, - 0x22, 0x23, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x47, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x77, 0x65, - 0x61, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x22, 0x9f, - 0x01, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, 0x12, 0x1d, 0x0a, 0x0a, 0x68, - 0x74, 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x68, 0x74, 0x74, 0x70, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x66, - 0x74, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x66, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x5f, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x50, 0x72, - 0x6f, 0x78, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x22, 0x2b, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xc9, 0x01, - 0x0a, 0x0a, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, - 0x64, 0x65, 0x66, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x66, 0x12, 0x38, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x4b, 0x0a, 0x0d, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x32, 0x0a, 0x06, 0x46, 0x69, 0x6c, - 0x65, 0x4f, 0x70, 0x12, 0x28, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xca, 0x02, - 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x61, 0x72, 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x70, 0x79, 0x48, 0x00, 0x52, 0x04, 0x63, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x0a, 0x06, - 0x6d, 0x6b, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, - 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x46, 0x69, - 0x6c, 0x65, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x6b, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x05, - 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x44, 0x69, 0x72, - 0x48, 0x00, 0x52, 0x05, 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x12, 0x22, 0x0a, 0x02, 0x72, 0x6d, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6d, 0x48, 0x00, 0x52, 0x02, 0x72, 0x6d, 0x12, 0x31, 0x0a, - 0x07, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, - 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x07, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, - 0x42, 0x08, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x04, 0x0a, 0x0e, 0x46, - 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x73, 0x72, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x72, 0x63, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, - 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, 0x74, - 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x66, - 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0d, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, - 0x6b, 0x12, 0x28, 0x0a, 0x0f, 0x64, 0x69, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x72, 0x43, - 0x6f, 0x70, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x20, 0x61, - 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x44, 0x6f, 0x63, 0x6b, - 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x55, 0x6e, - 0x70, 0x61, 0x63, 0x6b, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, - 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, - 0x24, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, - 0x64, 0x63, 0x61, 0x72, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x57, 0x69, 0x6c, - 0x64, 0x63, 0x61, 0x72, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x29, - 0x0a, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, - 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x1e, 0x61, 0x6c, 0x77, - 0x61, 0x79, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x1e, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, - 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, 0x90, 0x01, 0x0a, 0x10, - 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x46, 0x69, 0x6c, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x05, - 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, - 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, - 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x89, - 0x01, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6d, - 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x12, 0x18, - 0x0a, 0x07, 0x6e, 0x65, 0x77, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6e, 0x65, 0x77, 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, - 0x77, 0x6e, 0x4f, 0x70, 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x46, - 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x44, 0x69, 0x72, 0x12, 0x12, - 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x6b, 0x65, 0x50, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x6b, - 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa9, 0x02, 0x0a, 0x07, + 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4f, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, + 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4f, 0x70, 0x2e, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x64, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x03, 0x64, 0x65, 0x66, 0x12, 0x2c, 0x0a, 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4f, 0x70, + 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x61, 0x74, 0x74, + 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0b, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x38, 0x0a, + 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x87, 0x03, 0x0a, 0x0a, + 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x41, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x32, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x61, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x63, 0x61, + 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x2e, + 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x0d, 0x70, + 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x3e, 0x0a, 0x10, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, + 0x43, 0x61, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb4, 0x01, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x05, 0x69, 0x6e, 0x66, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x1a, + 0x4b, 0x0a, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x37, 0x0a, 0x09, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, + 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0a, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, + 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x22, 0x4f, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x21, + 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, + 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x22, 0x4b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1e, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, + 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x3c, + 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, + 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x22, 0x23, 0x0a, 0x0b, + 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x47, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x22, 0x9f, 0x01, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x68, 0x74, 0x74, + 0x70, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x74, 0x70, 0x5f, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x74, 0x70, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, + 0x1b, 0x0a, 0x09, 0x61, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x61, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x22, 0x2b, 0x0a, 0x11, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x44, 0x65, + 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x66, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x66, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x4b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, + 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x32, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x12, + 0x28, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xca, 0x02, 0x0a, 0x0a, 0x46, 0x69, + 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x26, + 0x0a, 0x0e, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, + 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x28, + 0x0a, 0x04, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, + 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x70, 0x79, + 0x48, 0x00, 0x52, 0x04, 0x63, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x0a, 0x06, 0x6d, 0x6b, 0x66, 0x69, + 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x00, + 0x52, 0x06, 0x6d, 0x6b, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x6d, 0x6b, 0x64, 0x69, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x44, 0x69, 0x72, 0x48, 0x00, 0x52, 0x05, + 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x12, 0x22, 0x0a, 0x02, 0x72, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x6d, 0x48, 0x00, 0x52, 0x02, 0x72, 0x6d, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x79, 0x6d, + 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, + 0x6b, 0x48, 0x00, 0x52, 0x07, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x42, 0x08, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x04, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x72, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x72, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x73, 0x74, 0x12, + 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, 0x74, 0x52, 0x05, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, + 0x77, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x28, 0x0a, + 0x0f, 0x64, 0x69, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, + 0x70, 0x74, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, + 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, + 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x57, + 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, + 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, + 0x72, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x74, + 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x1e, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x52, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1e, 0x61, + 0x6c, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, 0x90, 0x01, 0x0a, 0x10, 0x46, 0x69, 0x6c, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, + 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x6e, 0x0a, 0x0c, 0x46, 0x69, - 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x24, - 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x74, 0x46, - 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, - 0x64, 0x63, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x22, 0x4e, 0x0a, 0x08, 0x43, 0x68, - 0x6f, 0x77, 0x6e, 0x4f, 0x70, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, - 0x74, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, - 0x4f, 0x70, 0x74, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x53, 0x0a, 0x07, 0x55, 0x73, - 0x65, 0x72, 0x4f, 0x70, 0x74, 0x12, 0x2a, 0x0a, 0x06, 0x62, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x48, 0x00, 0x52, 0x06, 0x62, 0x79, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x14, 0x0a, 0x04, 0x62, 0x79, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, - 0x00, 0x52, 0x04, 0x62, 0x79, 0x49, 0x44, 0x42, 0x06, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, - 0x38, 0x0a, 0x0c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x22, 0x0a, 0x0a, 0x4d, 0x65, 0x72, - 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x31, 0x0a, - 0x07, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x4f, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, - 0x72, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x22, 0x26, 0x0a, 0x0e, 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x26, 0x0a, 0x0e, 0x55, 0x70, 0x70, 0x65, - 0x72, 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x22, 0x5c, 0x0a, 0x06, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x12, 0x28, 0x0a, 0x05, 0x6c, 0x6f, - 0x77, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x4c, - 0x6f, 0x77, 0x65, 0x72, 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x6c, - 0x6f, 0x77, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x05, 0x75, 0x70, 0x70, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x70, 0x65, 0x72, 0x44, 0x69, - 0x66, 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x75, 0x70, 0x70, 0x65, 0x72, 0x2a, 0x28, - 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x53, - 0x45, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x01, 0x12, 0x08, - 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x41, 0x4e, 0x44, - 0x42, 0x4f, 0x58, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x45, 0x43, 0x55, 0x52, - 0x45, 0x10, 0x01, 0x2a, 0x40, 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x08, 0x0a, 0x04, 0x42, 0x49, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, - 0x43, 0x52, 0x45, 0x54, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x48, 0x10, 0x02, 0x12, - 0x09, 0x0a, 0x05, 0x43, 0x41, 0x43, 0x48, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x4d, - 0x50, 0x46, 0x53, 0x10, 0x04, 0x2a, 0x31, 0x0a, 0x11, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4e, 0x10, 0x01, 0x12, - 0x07, 0x0a, 0x03, 0x4f, 0x46, 0x46, 0x10, 0x02, 0x2a, 0x36, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x48, 0x41, 0x52, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x49, 0x56, 0x41, - 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x02, - 0x42, 0x24, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, - 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x72, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x89, 0x01, 0x0a, 0x11, 0x46, + 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, + 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, + 0x77, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, + 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, + 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x46, 0x69, 0x6c, 0x65, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x44, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6d, 0x6f, + 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, + 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x6e, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, + 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x22, 0x4e, 0x0a, 0x08, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, + 0x70, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x52, 0x04, 0x75, + 0x73, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x52, + 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x53, 0x0a, 0x07, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, + 0x74, 0x12, 0x2a, 0x0a, 0x06, 0x62, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, + 0x4f, 0x70, 0x74, 0x48, 0x00, 0x52, 0x06, 0x62, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x04, 0x62, 0x79, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x62, + 0x79, 0x49, 0x44, 0x42, 0x06, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x38, 0x0a, 0x0c, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x22, 0x0a, 0x0a, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x31, 0x0a, 0x07, 0x4d, 0x65, 0x72, + 0x67, 0x65, 0x4f, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0x26, 0x0a, 0x0e, + 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x22, 0x26, 0x0a, 0x0e, 0x55, 0x70, 0x70, 0x65, 0x72, 0x44, 0x69, 0x66, + 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x5c, 0x0a, 0x06, + 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x12, 0x28, 0x0a, 0x05, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x77, 0x65, 0x72, + 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x6c, 0x6f, 0x77, 0x65, 0x72, + 0x12, 0x28, 0x0a, 0x05, 0x75, 0x70, 0x70, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x70, 0x65, 0x72, 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x52, 0x05, 0x75, 0x70, 0x70, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x07, 0x4e, 0x65, + 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, + 0x4e, 0x45, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x41, 0x4e, 0x44, 0x42, 0x4f, 0x58, 0x10, + 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x45, 0x43, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, + 0x40, 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, + 0x42, 0x49, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x52, 0x45, 0x54, + 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x48, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x43, + 0x41, 0x43, 0x48, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x4d, 0x50, 0x46, 0x53, 0x10, + 0x04, 0x2a, 0x31, 0x0a, 0x11, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, + 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x4f, + 0x46, 0x46, 0x10, 0x02, 0x2a, 0x36, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x10, 0x01, + 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x02, 0x42, 0x24, 0x5a, 0x22, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2f, + 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3756,7 +3828,7 @@ func file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP() []byte { } var file_github_com_moby_buildkit_solver_pb_ops_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes = make([]protoimpl.MessageInfo, 50) +var file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes = make([]protoimpl.MessageInfo, 51) var file_github_com_moby_buildkit_solver_pb_ops_proto_goTypes = []any{ (NetMode)(0), // 0: pb.NetMode (SecurityMode)(0), // 1: pb.SecurityMode @@ -3771,115 +3843,117 @@ var file_github_com_moby_buildkit_solver_pb_ops_proto_goTypes = []any{ (*HostIP)(nil), // 10: pb.HostIP (*Ulimit)(nil), // 11: pb.Ulimit (*SecretEnv)(nil), // 12: pb.SecretEnv - (*Mount)(nil), // 13: pb.Mount - (*TmpfsOpt)(nil), // 14: pb.TmpfsOpt - (*CacheOpt)(nil), // 15: pb.CacheOpt - (*SecretOpt)(nil), // 16: pb.SecretOpt - (*SSHOpt)(nil), // 17: pb.SSHOpt - (*SourceOp)(nil), // 18: pb.SourceOp - (*BuildOp)(nil), // 19: pb.BuildOp - (*BuildInput)(nil), // 20: pb.BuildInput - (*OpMetadata)(nil), // 21: pb.OpMetadata - (*Source)(nil), // 22: pb.Source - (*Locations)(nil), // 23: pb.Locations - (*SourceInfo)(nil), // 24: pb.SourceInfo - (*Location)(nil), // 25: pb.Location - (*Range)(nil), // 26: pb.Range - (*Position)(nil), // 27: pb.Position - (*ExportCache)(nil), // 28: pb.ExportCache - (*ProgressGroup)(nil), // 29: pb.ProgressGroup - (*ProxyEnv)(nil), // 30: pb.ProxyEnv - (*WorkerConstraints)(nil), // 31: pb.WorkerConstraints - (*Definition)(nil), // 32: pb.Definition - (*FileOp)(nil), // 33: pb.FileOp - (*FileAction)(nil), // 34: pb.FileAction - (*FileActionCopy)(nil), // 35: pb.FileActionCopy - (*FileActionMkFile)(nil), // 36: pb.FileActionMkFile - (*FileActionSymlink)(nil), // 37: pb.FileActionSymlink - (*FileActionMkDir)(nil), // 38: pb.FileActionMkDir - (*FileActionRm)(nil), // 39: pb.FileActionRm - (*ChownOpt)(nil), // 40: pb.ChownOpt - (*UserOpt)(nil), // 41: pb.UserOpt - (*NamedUserOpt)(nil), // 42: pb.NamedUserOpt - (*MergeInput)(nil), // 43: pb.MergeInput - (*MergeOp)(nil), // 44: pb.MergeOp - (*LowerDiffInput)(nil), // 45: pb.LowerDiffInput - (*UpperDiffInput)(nil), // 46: pb.UpperDiffInput - (*DiffOp)(nil), // 47: pb.DiffOp - nil, // 48: pb.SourceOp.AttrsEntry - nil, // 49: pb.BuildOp.InputsEntry - nil, // 50: pb.BuildOp.AttrsEntry - nil, // 51: pb.OpMetadata.DescriptionEntry - nil, // 52: pb.OpMetadata.CapsEntry - nil, // 53: pb.Source.LocationsEntry - nil, // 54: pb.Definition.MetadataEntry + (*CDIDevice)(nil), // 13: pb.CDIDevice + (*Mount)(nil), // 14: pb.Mount + (*TmpfsOpt)(nil), // 15: pb.TmpfsOpt + (*CacheOpt)(nil), // 16: pb.CacheOpt + (*SecretOpt)(nil), // 17: pb.SecretOpt + (*SSHOpt)(nil), // 18: pb.SSHOpt + (*SourceOp)(nil), // 19: pb.SourceOp + (*BuildOp)(nil), // 20: pb.BuildOp + (*BuildInput)(nil), // 21: pb.BuildInput + (*OpMetadata)(nil), // 22: pb.OpMetadata + (*Source)(nil), // 23: pb.Source + (*Locations)(nil), // 24: pb.Locations + (*SourceInfo)(nil), // 25: pb.SourceInfo + (*Location)(nil), // 26: pb.Location + (*Range)(nil), // 27: pb.Range + (*Position)(nil), // 28: pb.Position + (*ExportCache)(nil), // 29: pb.ExportCache + (*ProgressGroup)(nil), // 30: pb.ProgressGroup + (*ProxyEnv)(nil), // 31: pb.ProxyEnv + (*WorkerConstraints)(nil), // 32: pb.WorkerConstraints + (*Definition)(nil), // 33: pb.Definition + (*FileOp)(nil), // 34: pb.FileOp + (*FileAction)(nil), // 35: pb.FileAction + (*FileActionCopy)(nil), // 36: pb.FileActionCopy + (*FileActionMkFile)(nil), // 37: pb.FileActionMkFile + (*FileActionSymlink)(nil), // 38: pb.FileActionSymlink + (*FileActionMkDir)(nil), // 39: pb.FileActionMkDir + (*FileActionRm)(nil), // 40: pb.FileActionRm + (*ChownOpt)(nil), // 41: pb.ChownOpt + (*UserOpt)(nil), // 42: pb.UserOpt + (*NamedUserOpt)(nil), // 43: pb.NamedUserOpt + (*MergeInput)(nil), // 44: pb.MergeInput + (*MergeOp)(nil), // 45: pb.MergeOp + (*LowerDiffInput)(nil), // 46: pb.LowerDiffInput + (*UpperDiffInput)(nil), // 47: pb.UpperDiffInput + (*DiffOp)(nil), // 48: pb.DiffOp + nil, // 49: pb.SourceOp.AttrsEntry + nil, // 50: pb.BuildOp.InputsEntry + nil, // 51: pb.BuildOp.AttrsEntry + nil, // 52: pb.OpMetadata.DescriptionEntry + nil, // 53: pb.OpMetadata.CapsEntry + nil, // 54: pb.Source.LocationsEntry + nil, // 55: pb.Definition.MetadataEntry } var file_github_com_moby_buildkit_solver_pb_ops_proto_depIdxs = []int32{ 7, // 0: pb.Op.inputs:type_name -> pb.Input 8, // 1: pb.Op.exec:type_name -> pb.ExecOp - 18, // 2: pb.Op.source:type_name -> pb.SourceOp - 33, // 3: pb.Op.file:type_name -> pb.FileOp - 19, // 4: pb.Op.build:type_name -> pb.BuildOp - 44, // 5: pb.Op.merge:type_name -> pb.MergeOp - 47, // 6: pb.Op.diff:type_name -> pb.DiffOp + 19, // 2: pb.Op.source:type_name -> pb.SourceOp + 34, // 3: pb.Op.file:type_name -> pb.FileOp + 20, // 4: pb.Op.build:type_name -> pb.BuildOp + 45, // 5: pb.Op.merge:type_name -> pb.MergeOp + 48, // 6: pb.Op.diff:type_name -> pb.DiffOp 6, // 7: pb.Op.platform:type_name -> pb.Platform - 31, // 8: pb.Op.constraints:type_name -> pb.WorkerConstraints + 32, // 8: pb.Op.constraints:type_name -> pb.WorkerConstraints 9, // 9: pb.ExecOp.meta:type_name -> pb.Meta - 13, // 10: pb.ExecOp.mounts:type_name -> pb.Mount + 14, // 10: pb.ExecOp.mounts:type_name -> pb.Mount 0, // 11: pb.ExecOp.network:type_name -> pb.NetMode 1, // 12: pb.ExecOp.security:type_name -> pb.SecurityMode 12, // 13: pb.ExecOp.secretenv:type_name -> pb.SecretEnv - 30, // 14: pb.Meta.proxy_env:type_name -> pb.ProxyEnv - 10, // 15: pb.Meta.extraHosts:type_name -> pb.HostIP - 11, // 16: pb.Meta.ulimit:type_name -> pb.Ulimit - 2, // 17: pb.Mount.mountType:type_name -> pb.MountType - 14, // 18: pb.Mount.TmpfsOpt:type_name -> pb.TmpfsOpt - 15, // 19: pb.Mount.cacheOpt:type_name -> pb.CacheOpt - 16, // 20: pb.Mount.secretOpt:type_name -> pb.SecretOpt - 17, // 21: pb.Mount.SSHOpt:type_name -> pb.SSHOpt - 3, // 22: pb.Mount.contentCache:type_name -> pb.MountContentCache - 4, // 23: pb.CacheOpt.sharing:type_name -> pb.CacheSharingOpt - 48, // 24: pb.SourceOp.attrs:type_name -> pb.SourceOp.AttrsEntry - 49, // 25: pb.BuildOp.inputs:type_name -> pb.BuildOp.InputsEntry - 32, // 26: pb.BuildOp.def:type_name -> pb.Definition - 50, // 27: pb.BuildOp.attrs:type_name -> pb.BuildOp.AttrsEntry - 51, // 28: pb.OpMetadata.description:type_name -> pb.OpMetadata.DescriptionEntry - 28, // 29: pb.OpMetadata.export_cache:type_name -> pb.ExportCache - 52, // 30: pb.OpMetadata.caps:type_name -> pb.OpMetadata.CapsEntry - 29, // 31: pb.OpMetadata.progress_group:type_name -> pb.ProgressGroup - 53, // 32: pb.Source.locations:type_name -> pb.Source.LocationsEntry - 24, // 33: pb.Source.infos:type_name -> pb.SourceInfo - 25, // 34: pb.Locations.locations:type_name -> pb.Location - 32, // 35: pb.SourceInfo.definition:type_name -> pb.Definition - 26, // 36: pb.Location.ranges:type_name -> pb.Range - 27, // 37: pb.Range.start:type_name -> pb.Position - 27, // 38: pb.Range.end:type_name -> pb.Position - 54, // 39: pb.Definition.metadata:type_name -> pb.Definition.MetadataEntry - 22, // 40: pb.Definition.Source:type_name -> pb.Source - 34, // 41: pb.FileOp.actions:type_name -> pb.FileAction - 35, // 42: pb.FileAction.copy:type_name -> pb.FileActionCopy - 36, // 43: pb.FileAction.mkfile:type_name -> pb.FileActionMkFile - 38, // 44: pb.FileAction.mkdir:type_name -> pb.FileActionMkDir - 39, // 45: pb.FileAction.rm:type_name -> pb.FileActionRm - 37, // 46: pb.FileAction.symlink:type_name -> pb.FileActionSymlink - 40, // 47: pb.FileActionCopy.owner:type_name -> pb.ChownOpt - 40, // 48: pb.FileActionMkFile.owner:type_name -> pb.ChownOpt - 40, // 49: pb.FileActionSymlink.owner:type_name -> pb.ChownOpt - 40, // 50: pb.FileActionMkDir.owner:type_name -> pb.ChownOpt - 41, // 51: pb.ChownOpt.user:type_name -> pb.UserOpt - 41, // 52: pb.ChownOpt.group:type_name -> pb.UserOpt - 42, // 53: pb.UserOpt.byName:type_name -> pb.NamedUserOpt - 43, // 54: pb.MergeOp.inputs:type_name -> pb.MergeInput - 45, // 55: pb.DiffOp.lower:type_name -> pb.LowerDiffInput - 46, // 56: pb.DiffOp.upper:type_name -> pb.UpperDiffInput - 20, // 57: pb.BuildOp.InputsEntry.value:type_name -> pb.BuildInput - 23, // 58: pb.Source.LocationsEntry.value:type_name -> pb.Locations - 21, // 59: pb.Definition.MetadataEntry.value:type_name -> pb.OpMetadata - 60, // [60:60] is the sub-list for method output_type - 60, // [60:60] is the sub-list for method input_type - 60, // [60:60] is the sub-list for extension type_name - 60, // [60:60] is the sub-list for extension extendee - 0, // [0:60] is the sub-list for field type_name + 13, // 14: pb.ExecOp.cdiDevices:type_name -> pb.CDIDevice + 31, // 15: pb.Meta.proxy_env:type_name -> pb.ProxyEnv + 10, // 16: pb.Meta.extraHosts:type_name -> pb.HostIP + 11, // 17: pb.Meta.ulimit:type_name -> pb.Ulimit + 2, // 18: pb.Mount.mountType:type_name -> pb.MountType + 15, // 19: pb.Mount.TmpfsOpt:type_name -> pb.TmpfsOpt + 16, // 20: pb.Mount.cacheOpt:type_name -> pb.CacheOpt + 17, // 21: pb.Mount.secretOpt:type_name -> pb.SecretOpt + 18, // 22: pb.Mount.SSHOpt:type_name -> pb.SSHOpt + 3, // 23: pb.Mount.contentCache:type_name -> pb.MountContentCache + 4, // 24: pb.CacheOpt.sharing:type_name -> pb.CacheSharingOpt + 49, // 25: pb.SourceOp.attrs:type_name -> pb.SourceOp.AttrsEntry + 50, // 26: pb.BuildOp.inputs:type_name -> pb.BuildOp.InputsEntry + 33, // 27: pb.BuildOp.def:type_name -> pb.Definition + 51, // 28: pb.BuildOp.attrs:type_name -> pb.BuildOp.AttrsEntry + 52, // 29: pb.OpMetadata.description:type_name -> pb.OpMetadata.DescriptionEntry + 29, // 30: pb.OpMetadata.export_cache:type_name -> pb.ExportCache + 53, // 31: pb.OpMetadata.caps:type_name -> pb.OpMetadata.CapsEntry + 30, // 32: pb.OpMetadata.progress_group:type_name -> pb.ProgressGroup + 54, // 33: pb.Source.locations:type_name -> pb.Source.LocationsEntry + 25, // 34: pb.Source.infos:type_name -> pb.SourceInfo + 26, // 35: pb.Locations.locations:type_name -> pb.Location + 33, // 36: pb.SourceInfo.definition:type_name -> pb.Definition + 27, // 37: pb.Location.ranges:type_name -> pb.Range + 28, // 38: pb.Range.start:type_name -> pb.Position + 28, // 39: pb.Range.end:type_name -> pb.Position + 55, // 40: pb.Definition.metadata:type_name -> pb.Definition.MetadataEntry + 23, // 41: pb.Definition.Source:type_name -> pb.Source + 35, // 42: pb.FileOp.actions:type_name -> pb.FileAction + 36, // 43: pb.FileAction.copy:type_name -> pb.FileActionCopy + 37, // 44: pb.FileAction.mkfile:type_name -> pb.FileActionMkFile + 39, // 45: pb.FileAction.mkdir:type_name -> pb.FileActionMkDir + 40, // 46: pb.FileAction.rm:type_name -> pb.FileActionRm + 38, // 47: pb.FileAction.symlink:type_name -> pb.FileActionSymlink + 41, // 48: pb.FileActionCopy.owner:type_name -> pb.ChownOpt + 41, // 49: pb.FileActionMkFile.owner:type_name -> pb.ChownOpt + 41, // 50: pb.FileActionSymlink.owner:type_name -> pb.ChownOpt + 41, // 51: pb.FileActionMkDir.owner:type_name -> pb.ChownOpt + 42, // 52: pb.ChownOpt.user:type_name -> pb.UserOpt + 42, // 53: pb.ChownOpt.group:type_name -> pb.UserOpt + 43, // 54: pb.UserOpt.byName:type_name -> pb.NamedUserOpt + 44, // 55: pb.MergeOp.inputs:type_name -> pb.MergeInput + 46, // 56: pb.DiffOp.lower:type_name -> pb.LowerDiffInput + 47, // 57: pb.DiffOp.upper:type_name -> pb.UpperDiffInput + 21, // 58: pb.BuildOp.InputsEntry.value:type_name -> pb.BuildInput + 24, // 59: pb.Source.LocationsEntry.value:type_name -> pb.Locations + 22, // 60: pb.Definition.MetadataEntry.value:type_name -> pb.OpMetadata + 61, // [61:61] is the sub-list for method output_type + 61, // [61:61] is the sub-list for method input_type + 61, // [61:61] is the sub-list for extension type_name + 61, // [61:61] is the sub-list for extension extendee + 0, // [0:61] is the sub-list for field type_name } func init() { file_github_com_moby_buildkit_solver_pb_ops_proto_init() } @@ -3895,14 +3969,14 @@ func file_github_com_moby_buildkit_solver_pb_ops_proto_init() { (*Op_Merge)(nil), (*Op_Diff)(nil), } - file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[29].OneofWrappers = []any{ + file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[30].OneofWrappers = []any{ (*FileAction_Copy)(nil), (*FileAction_Mkfile)(nil), (*FileAction_Mkdir)(nil), (*FileAction_Rm)(nil), (*FileAction_Symlink)(nil), } - file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[36].OneofWrappers = []any{ + file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes[37].OneofWrappers = []any{ (*UserOpt_ByName)(nil), (*UserOpt_ByID)(nil), } @@ -3912,7 +3986,7 @@ func file_github_com_moby_buildkit_solver_pb_ops_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc, NumEnums: 5, - NumMessages: 50, + NumMessages: 51, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto index 5ff388299c..90b69560e0 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -47,6 +47,7 @@ message ExecOp { NetMode network = 3; SecurityMode security = 4; repeated SecretEnv secretenv = 5; + repeated CDIDevice cdiDevices = 6; } // Meta is a set of arguments for ExecOp. @@ -95,6 +96,15 @@ message SecretEnv { bool optional = 3; } +// CDIDevice specifies a CDI device information. +message CDIDevice { + // Fully qualified CDI device name (e.g., vendor.com/gpu=gpudevice1) + // https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md + string name = 1; + // Optional defines if CDI device is required. + bool optional = 2; +} + // Mount specifies how to mount an input Op as a filesystem. message Mount { int64 input = 1; diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops_vtproto.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops_vtproto.pb.go index 9d71812423..1a6c89c369 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops_vtproto.pb.go +++ b/vendor/github.com/moby/buildkit/solver/pb/ops_vtproto.pb.go @@ -166,6 +166,13 @@ func (m *ExecOp) CloneVT() *ExecOp { } r.Secretenv = tmpContainer } + if rhs := m.CdiDevices; rhs != nil { + tmpContainer := make([]*CDIDevice, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CdiDevices = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -284,6 +291,24 @@ func (m *SecretEnv) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *CDIDevice) CloneVT() *CDIDevice { + if m == nil { + return (*CDIDevice)(nil) + } + r := new(CDIDevice) + r.Name = m.Name + r.Optional = m.Optional + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CDIDevice) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Mount) CloneVT() *Mount { if m == nil { return (*Mount)(nil) @@ -1429,6 +1454,23 @@ func (this *ExecOp) EqualVT(that *ExecOp) bool { } } } + if len(this.CdiDevices) != len(that.CdiDevices) { + return false + } + for i, vx := range this.CdiDevices { + vy := that.CdiDevices[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CDIDevice{} + } + if q == nil { + q = &CDIDevice{} + } + if !p.EqualVT(q) { + return false + } + } + } return string(this.unknownFields) == string(that.unknownFields) } @@ -1606,6 +1648,28 @@ func (this *SecretEnv) EqualMessageVT(thatMsg proto.Message) bool { } return this.EqualVT(that) } +func (this *CDIDevice) EqualVT(that *CDIDevice) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.Optional != that.Optional { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CDIDevice) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CDIDevice) + if !ok { + return false + } + return this.EqualVT(that) +} func (this *Mount) EqualVT(that *Mount) bool { if this == that { return true @@ -3220,6 +3284,18 @@ func (m *ExecOp) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.CdiDevices) > 0 { + for iNdEx := len(m.CdiDevices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CdiDevices[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } if len(m.Secretenv) > 0 { for iNdEx := len(m.Secretenv) - 1; iNdEx >= 0; iNdEx-- { size, err := m.Secretenv[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) @@ -3565,6 +3641,56 @@ func (m *SecretEnv) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CDIDevice) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CDIDevice) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CDIDevice) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Optional { + i-- + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Mount) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -6023,6 +6149,12 @@ func (m *ExecOp) SizeVT() (n int) { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } + if len(m.CdiDevices) > 0 { + for _, e := range m.CdiDevices { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -6150,6 +6282,23 @@ func (m *SecretEnv) SizeVT() (n int) { return n } +func (m *CDIDevice) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Optional { + n += 2 + } + n += len(m.unknownFields) + return n +} + func (m *Mount) SizeVT() (n int) { if m == nil { return 0 @@ -7936,6 +8085,40 @@ func (m *ExecOp) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CdiDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CdiDevices = append(m.CdiDevices, &CDIDevice{}) + if err := m.CdiDevices[len(m.CdiDevices)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -8772,6 +8955,109 @@ func (m *SecretEnv) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *CDIDevice) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CDIDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CDIDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Optional = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Mount) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/moby/buildkit/solver/progress.go b/vendor/github.com/moby/buildkit/solver/progress.go index 92e2c6cb00..dfc01b801e 100644 --- a/vendor/github.com/moby/buildkit/solver/progress.go +++ b/vendor/github.com/moby/buildkit/solver/progress.go @@ -60,7 +60,9 @@ func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { bklog.G(ctx).Warnf("progress %s log without vertex info", p.ID) continue } - v.Vertex = vtx.(digest.Digest) + if v.Vertex == "" { + v.Vertex = vtx.(digest.Digest) + } v.Timestamp = p.Timestamp ss.Logs = append(ss.Logs, &v) case client.VertexWarning: @@ -69,7 +71,9 @@ func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { bklog.G(ctx).Warnf("progress %s warning without vertex info", p.ID) continue } - v.Vertex = vtx.(digest.Digest) + if v.Vertex == "" { + v.Vertex = vtx.(digest.Digest) + } ss.Warnings = append(ss.Warnings, &v) } } diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go index cb1aa06d18..a351a7944e 100644 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go @@ -17,6 +17,7 @@ const ( var ( UserCNIConfigPath = filepath.Join(UserConfigDir(), "cni.json") + CDISpecDirs = []string{"/etc/cdi", "/var/run/cdi", "/etc/buildkit/cdi"} ) // UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go index 323c8a98ab..c2103887a3 100644 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go @@ -18,6 +18,7 @@ var ( var ( UserCNIConfigPath = DefaultCNIConfigPath + CDISpecDirs []string ) func UserAddress() string { diff --git a/vendor/github.com/moby/buildkit/util/contentutil/refs.go b/vendor/github.com/moby/buildkit/util/contentutil/refs.go index 48abcdad55..affbfabc7d 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/refs.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/refs.go @@ -16,12 +16,41 @@ import ( "github.com/pkg/errors" ) -func ProviderFromRef(ref string) (ocispecs.Descriptor, content.Provider, error) { +type ResolveOpt struct { + Credentials func(string) (string, string, error) +} + +type ResolveOptFunc func(*ResolveOpt) + +func WithCredentials(c func(string) (string, string, error)) ResolveOptFunc { + return func(o *ResolveOpt) { + o.Credentials = func(host string) (string, string, error) { + if host == "registry-1.docker.io" { + host = "https://index.docker.io/v1/" + } + return c(host) + } + } +} + +func ProviderFromRef(ref string, opts ...ResolveOptFunc) (ocispecs.Descriptor, content.Provider, error) { headers := http.Header{} headers.Set("User-Agent", version.UserAgent()) - remote := docker.NewResolver(docker.ResolverOptions{ + + var ro ResolveOpt + for _, f := range opts { + f(&ro) + } + + dro := docker.ResolverOptions{ Headers: headers, - }) + } + if ro.Credentials != nil { + dro.Hosts = docker.ConfigureDefaultRegistries( + docker.WithAuthorizer(docker.NewDockerAuthorizer(docker.WithAuthCreds(ro.Credentials))), + ) + } + remote := docker.NewResolver(dro) name, desc, err := remote.Resolve(context.TODO(), ref) if err != nil { diff --git a/vendor/github.com/moby/buildkit/worker/base/worker.go b/vendor/github.com/moby/buildkit/worker/base/worker.go index ffe78f6ec6..c1569cd98a 100644 --- a/vendor/github.com/moby/buildkit/worker/base/worker.go +++ b/vendor/github.com/moby/buildkit/worker/base/worker.go @@ -33,6 +33,7 @@ import ( containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/snapshot/imagerefchecker" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/llbsolver/mounts" "github.com/moby/buildkit/solver/llbsolver/ops" "github.com/moby/buildkit/solver/pb" @@ -82,6 +83,7 @@ type WorkerOpt struct { MetadataStore *metadata.Store MountPoolRoot string ResourceMonitor *resources.Monitor + CDIManager *cdidevices.Manager } // Worker is a local worker instance with dedicated snapshotter, cache, and so on. @@ -245,6 +247,10 @@ func (w *Worker) LeaseManager() *leaseutil.Manager { return w.WorkerOpt.LeaseManager } +func (w *Worker) CDIManager() *cdidevices.Manager { + return w.WorkerOpt.CDIManager +} + func (w *Worker) ID() string { return w.WorkerOpt.ID } diff --git a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go index 31f5e01b03..c6a5b1e058 100644 --- a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go +++ b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go @@ -16,6 +16,7 @@ import ( "github.com/moby/buildkit/executor/containerdexecutor" "github.com/moby/buildkit/executor/oci" containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/network/netproviders" "github.com/moby/buildkit/util/winlayers" @@ -43,6 +44,7 @@ type WorkerOptions struct { ParallelismSem *semaphore.Weighted TraceSocket string Runtime *RuntimeInfo + CDIManager *cdidevices.Manager } // NewWorkerOpt creates a WorkerOpt. @@ -162,6 +164,7 @@ func newContainerd(client *ctd.Client, workerOpts WorkerOptions) (base.WorkerOpt TraceSocket: workerOpts.TraceSocket, Rootless: workerOpts.Rootless, Runtime: workerOpts.Runtime, + CDIManager: workerOpts.CDIManager, NetworkProviders: np, } @@ -182,6 +185,7 @@ func newContainerd(client *ctd.Client, workerOpts WorkerOptions) (base.WorkerOpt GarbageCollect: gc, ParallelismSem: workerOpts.ParallelismSem, MountPoolRoot: filepath.Join(root, "cachemounts"), + CDIManager: workerOpts.CDIManager, } return opt, nil } diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go index ca5292e9ec..0b5b4cd3b4 100644 --- a/vendor/github.com/moby/buildkit/worker/worker.go +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -13,6 +13,7 @@ import ( "github.com/moby/buildkit/session" containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/cdidevices" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/leaseutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -41,6 +42,7 @@ type Worker interface { CacheManager() cache.Manager LeaseManager() *leaseutil.Manager GarbageCollect(context.Context) error + CDIManager() *cdidevices.Manager } type Infos interface { diff --git a/vendor/github.com/tonistiigi/go-actions-cache/Dockerfile b/vendor/github.com/tonistiigi/go-actions-cache/Dockerfile index ad3160a699..2b7680e542 100644 --- a/vendor/github.com/tonistiigi/go-actions-cache/Dockerfile +++ b/vendor/github.com/tonistiigi/go-actions-cache/Dockerfile @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1 ARG GO_VERSION=1.23 -ARG XX_VERSION=1.5.0 +ARG XX_VERSION=1.6.1 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx @@ -14,6 +14,9 @@ FROM base AS test ARG TESTFLAGS ARG GITHUB_REPOSITORY ARG ACTIONS_CACHE_URL +ARG ACTIONS_CACHE_API_FORCE_VERSION +ARG ACTIONS_CACHE_SERVICE_V2 +ARG ACTIONS_RESULTS_URL RUN --mount=target=. \ --mount=target=/go/pkg/mod,type=cache \ --mount=target=/root/.cache,type=cache \ diff --git a/vendor/github.com/tonistiigi/go-actions-cache/cache.go b/vendor/github.com/tonistiigi/go-actions-cache/cache.go index 516e719431..a3173e3d41 100644 --- a/vendor/github.com/tonistiigi/go-actions-cache/cache.go +++ b/vendor/github.com/tonistiigi/go-actions-cache/cache.go @@ -56,27 +56,54 @@ func NewBlob(dt []byte) Blob { } func TryEnv(opt Opt) (*Cache, error) { + var v2 bool + if v, ok := os.LookupEnv("ACTIONS_CACHE_SERVICE_V2"); ok { + if b, err := strconv.ParseBool(v); err == nil { + v2 = b + } + } + + if v, ok := os.LookupEnv("ACTIONS_CACHE_API_FORCE_VERSION"); ok && v != "" { + switch v { + case "v1": + v2 = false + case "v2": + v2 = true + default: + return nil, errors.Errorf("invalid ACTIONS_CACHE_API_FORCE_VERSION %q", v) + } + } + + var token string + var cacheURL string + if v2 { + cacheURL, _ = os.LookupEnv("ACTIONS_RESULTS_URL") + } else { + // ACTIONS_CACHE_URL=https://artifactcache.actions.githubusercontent.com/xxx/ + cacheURL, _ = os.LookupEnv("ACTIONS_CACHE_URL") + } + tokenEnc, ok := os.LookupEnv("GHCACHE_TOKEN_ENC") if ok { - url, token, err := decryptToken(tokenEnc, os.Getenv("GHCACHE_TOKEN_PW")) + url, tkn, err := decryptToken(tokenEnc, os.Getenv("GHCACHE_TOKEN_PW")) if err != nil { return nil, err } - return New(token, url, opt) + if cacheURL == "" { + cacheURL = url + } + token = tkn } - token, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN") - if !ok { + if token == "" { + token, _ = os.LookupEnv("ACTIONS_RUNTIME_TOKEN") + } + + if token == "" { return nil, nil } - // ACTIONS_CACHE_URL=https://artifactcache.actions.githubusercontent.com/xxx/ - cacheURL, ok := os.LookupEnv("ACTIONS_CACHE_URL") - if !ok { - return nil, nil - } - - return New(token, cacheURL, opt) + return New(token, cacheURL, v2, opt) } type Opt struct { @@ -85,7 +112,7 @@ type Opt struct { BackoffPool *BackoffPool } -func New(token, url string, opt Opt) (*Cache, error) { +func New(token, url string, v2 bool, opt Opt) (*Cache, error) { tk, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) if err != nil { return nil, errors.WithStack(err) @@ -146,6 +173,7 @@ func New(token, url string, opt Opt) (*Cache, error) { Token: tk, IssuedAt: nbft, ExpiresAt: expt, + IsV2: v2, }, nil } @@ -195,6 +223,7 @@ type Cache struct { Token *jwt.Token IssuedAt time.Time ExpiresAt time.Time + IsV2 bool } func (c *Cache) Scopes() []Scope { @@ -202,6 +231,13 @@ func (c *Cache) Scopes() []Scope { } func (c *Cache) Load(ctx context.Context, keys ...string) (*Entry, error) { + if c.IsV2 { + return c.loadV2(ctx, keys...) + } + return c.loadV1(ctx, keys...) +} + +func (c *Cache) loadV1(ctx context.Context, keys ...string) (*Entry, error) { u, err := url.Parse(c.url("cache")) if err != nil { return nil, err @@ -235,7 +271,23 @@ func (c *Cache) Load(ctx context.Context, keys ...string) (*Entry, error) { return &ce, nil } -func (c *Cache) reserve(ctx context.Context, key string) (int, error) { +func (c *Cache) reserve(ctx context.Context, key string) (string, string, error) { + if c.IsV2 { + url, err := c.reserveV2(ctx, key) + if err != nil { + return "", "", err + } + return key, url, nil + } + cid, err := c.reserveV1(ctx, key) + if err != nil { + return "", "", err + } + sid := strconv.Itoa(cid) + return sid, sid, nil +} + +func (c *Cache) reserveV1(ctx context.Context, key string) (int, error) { dt, err := json.Marshal(ReserveCacheReq{Key: key, Version: version(key)}) if err != nil { return 0, errors.WithStack(err) @@ -266,19 +318,26 @@ func (c *Cache) reserve(ctx context.Context, key string) (int, error) { return cr.CacheID, nil } -func (c *Cache) commit(ctx context.Context, id int, size int64) error { +func (c *Cache) commit(ctx context.Context, id string, size int64) error { + if c.IsV2 { + return c.commitV2(ctx, id, size) + } + return c.commitV1(ctx, id, size) +} + +func (c *Cache) commitV1(ctx context.Context, id string, size int64) error { dt, err := json.Marshal(CommitCacheReq{Size: size}) if err != nil { return errors.WithStack(err) } - req := c.newRequest("POST", c.url(fmt.Sprintf("caches/%d", id)), func() io.Reader { + req := c.newRequest("POST", c.url(fmt.Sprintf("caches/%s", id)), func() io.Reader { return bytes.NewReader(dt) }) req.headers["Content-Type"] = "application/json" Log("commit cache %s, size %d", req.url, size) resp, err := c.doWithRetries(ctx, req) if err != nil { - return errors.Wrapf(err, "error committing cache %d", id) + return errors.Wrapf(err, "error committing cache %s", id) } dt, err = io.ReadAll(io.LimitReader(resp.Body, 32*1024)) if err != nil { @@ -290,7 +349,14 @@ func (c *Cache) commit(ctx context.Context, id int, size int64) error { return resp.Body.Close() } -func (c *Cache) upload(ctx context.Context, id int, b Blob) error { +func (c *Cache) upload(ctx context.Context, url string, b Blob) error { + if c.IsV2 { + return c.uploadV2(ctx, url, b) + } + return c.uploadV1(ctx, url, b) +} + +func (c *Cache) uploadV1(ctx context.Context, id string, b Blob) error { var mu sync.Mutex eg, ctx := errgroup.WithContext(ctx) offset := int64(0) @@ -320,12 +386,12 @@ func (c *Cache) upload(ctx context.Context, id int, b Blob) error { } func (c *Cache) Save(ctx context.Context, key string, b Blob) error { - id, err := c.reserve(ctx, key) + id, url, err := c.reserve(ctx, key) if err != nil { return err } - if err := c.upload(ctx, id, b); err != nil { + if err := c.upload(ctx, url, b); err != nil { return err } @@ -370,10 +436,11 @@ loop0: return errors.Wrapf(err, "failed to parse %s index", key) } } - var cacheID int + var cacheID string + var url string for { idx++ - cacheID, err = c.reserve(ctx, fmt.Sprintf("%s#%d", key, idx)) + cacheID, url, err = c.reserve(ctx, fmt.Sprintf("%s#%d", key, idx)) if err != nil { if errors.Is(err, os.ErrExist) { if blocked <= forceTimeout { @@ -391,15 +458,15 @@ loop0: } break } - if err := c.upload(ctx, cacheID, b); err != nil { + if err := c.upload(ctx, url, b); err != nil { return nil } return c.commit(ctx, cacheID, b.Size()) } } -func (c *Cache) uploadChunk(ctx context.Context, id int, ra io.ReaderAt, off, n int64) error { - req := c.newRequest("PATCH", c.url(fmt.Sprintf("caches/%d", id)), func() io.Reader { +func (c *Cache) uploadChunk(ctx context.Context, id string, ra io.ReaderAt, off, n int64) error { + req := c.newRequest("PATCH", c.url(fmt.Sprintf("caches/%s", id)), func() io.Reader { return io.NewSectionReader(ra, off, n) }) req.headers["Content-Type"] = "application/octet-stream" @@ -433,12 +500,12 @@ func (c *Cache) newRequest(method, url string, body func() io.Reader) *request { } func (c *Cache) doWithRetries(ctx context.Context, r *request) (*http.Response, error) { - var err error + var lastErr error max := time.Now().Add(c.opt.Timeout) for { if err1 := c.opt.BackoffPool.Wait(ctx, time.Until(max)); err1 != nil { - if err != nil { - return nil, errors.Wrapf(err, "%v", err1) + if lastErr != nil { + return nil, errors.Wrapf(lastErr, "%v", err1) } return nil, err1 } @@ -458,6 +525,7 @@ func (c *Cache) doWithRetries(ctx context.Context, r *request) (*http.Response, if errors.As(err, &he) { if he.StatusCode == http.StatusTooManyRequests { c.opt.BackoffPool.Delay() + lastErr = err continue } } @@ -512,9 +580,10 @@ type CommitCacheReq struct { } type Entry struct { - Key string `json:"cacheKey"` - Scope string `json:"scope"` - URL string `json:"archiveLocation"` + Key string `json:"cacheKey"` + Scope string `json:"scope"` + URL string `json:"archiveLocation"` + IsAzureBlob bool `json:"isAzureBlob"` client *http.Client } @@ -529,6 +598,9 @@ func (ce *Entry) WriteTo(ctx context.Context, w io.Writer) error { // Download returns a ReaderAtCloser for pulling the data. Concurrent reads are not allowed func (ce *Entry) Download(ctx context.Context) ReaderAtCloser { + if ce.IsAzureBlob { + return ce.downloadV2(ctx) + } return toReaderAtCloser(func(offset int64) (io.ReadCloser, error) { req, err := http.NewRequest("GET", ce.URL, nil) if err != nil { @@ -644,7 +716,25 @@ func checkResponse(resp *http.Response) error { } else if gae.Message != "" { err = errors.WithStack(gae) } else { - err = errors.Errorf("unknown error %s: %s", resp.Status, dt) + var errorV2 struct { + Code string `json:"code"` + Msg string `json:"message"` + } + if err1 := json.Unmarshal(dt, &errorV2); err1 == nil && errorV2.Code != "" { + gae.Message = errorV2.Msg + if gae.Message == "" { + gae.Message = errorV2.Code + } else { + gae.Message = resp.Status + } + if errorV2.Code == "already_exists" { + errorV2.Code = "ArtifactCacheItemAlreadyExistsException" + } + gae.TypeKey = errorV2.Code + err = errors.WithStack(gae) + } else { + err = errors.Errorf("unknown error %s: %s", resp.Status, dt) + } } return HTTPError{ diff --git a/vendor/github.com/tonistiigi/go-actions-cache/cache_v2.go b/vendor/github.com/tonistiigi/go-actions-cache/cache_v2.go new file mode 100644 index 0000000000..22ea996e4a --- /dev/null +++ b/vendor/github.com/tonistiigi/go-actions-cache/cache_v2.go @@ -0,0 +1,194 @@ +package actionscache + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/pkg/errors" +) + +func (c *Cache) reserveV2(ctx context.Context, key string) (string, error) { + dt, err := json.Marshal(ReserveCacheReq{Key: key, Version: version(key)}) + if err != nil { + return "", errors.WithStack(err) + } + req := c.newRequestV2(c.urlV2("CreateCacheEntry"), func() io.Reader { + return bytes.NewReader(dt) + }) + Log("save cache req %s body=%s", req.url, dt) + resp, err := c.doWithRetries(ctx, req) + if err != nil { + return "", errors.WithStack(err) + } + defer resp.Body.Close() + + dt, err = io.ReadAll(io.LimitReader(resp.Body, 32*1024)) + if err != nil { + return "", errors.WithStack(err) + } + Log("save cache resp %s body=%s", req.url, dt) + var cr struct { + OK bool `json:"ok"` + SignedUploadURL string `json:"signed_upload_url"` + } + if err := json.Unmarshal(dt, &cr); err != nil { + return "", errors.WithStack(err) + } + + if !cr.OK { + return "", errors.New("failed to reserve cache") + } + return cr.SignedUploadURL, nil +} + +func (c *Cache) uploadV2(ctx context.Context, url string, b Blob) error { + client, err := blockblob.NewClientWithNoCredential(url, nil) + if err != nil { + return errors.WithStack(err) + } + + // uploading with sized blocks requires io.File ¯\_(ツ)_/¯ + resp, err := client.UploadStream(ctx, &rc{ReaderAt: b}, nil) + if err != nil { + return errors.WithStack(err) + } + Log("upload cache %s %s", url, *resp.RequestID) + return nil +} + +func (ce *Entry) downloadV2(ctx context.Context) ReaderAtCloser { + return toReaderAtCloser(func(offset int64) (io.ReadCloser, error) { + client, err := blockblob.NewClientWithNoCredential(ce.URL, nil) + if err != nil { + return nil, errors.WithStack(err) + } + resp, err := client.DownloadStream(ctx, &blob.DownloadStreamOptions{ + Range: blob.HTTPRange{Offset: offset}, + }) + if err != nil { + return nil, errors.WithStack(err) + } + return resp.Body, nil + }) +} + +func (c *Cache) commitV2(ctx context.Context, key string, size int64) error { + var payload = struct { + Key string `json:"key"` + SizeBytes int64 `json:"size_bytes"` + Version string `json:"version"` + }{ + Key: key, + SizeBytes: size, + Version: version(key), + } + + dt, err := json.Marshal(payload) + if err != nil { + return errors.WithStack(err) + } + + req := c.newRequestV2(c.urlV2("FinalizeCacheEntryUpload"), func() io.Reader { + return bytes.NewReader(dt) + }) + + Log("commit cache req %s body=%s", req.url, dt) + + resp, err := c.doWithRetries(ctx, req) + if err != nil { + return errors.WithStack(err) + } + defer resp.Body.Close() + + dt, err = io.ReadAll(io.LimitReader(resp.Body, 32*1024)) + if err != nil { + return errors.WithStack(err) + } + + var cr struct { + OK bool `json:"ok"` + EntryID string `json:"entry_id"` + } + if err := json.Unmarshal(dt, &cr); err != nil { + return errors.WithStack(err) + } + if !cr.OK { + return errors.New("failed to commit cache") + } + + Log("commit cache resp %s %s", req.url, cr.EntryID) + return nil +} + +func (c *Cache) loadV2(ctx context.Context, keys ...string) (*Entry, error) { + var payload = struct { + Key string `json:"key"` + RestoreKeys []string `json:"restore_keys"` + Version string `json:"version"` + }{ + Key: keys[0], + RestoreKeys: keys, + Version: version(keys[0]), + } + dt, err := json.Marshal(payload) + if err != nil { + return nil, errors.WithStack(err) + } + + req := c.newRequestV2(c.urlV2("GetCacheEntryDownloadURL"), func() io.Reader { + return bytes.NewReader(dt) + }) + + resp, err := c.doWithRetries(ctx, req) + if err != nil { + return nil, errors.WithStack(err) + } + defer resp.Body.Close() + + dt, err = io.ReadAll(io.LimitReader(resp.Body, 32*1024)) + if err != nil { + return nil, errors.WithStack(err) + } + + var val struct { + OK bool `json:"ok"` + SignedDownloadURL string `json:"signed_download_url"` + MatchedKey string `json:"matched_key"` + } + + if err := json.Unmarshal(dt, &val); err != nil { + return nil, errors.WithStack(err) + } + + if !val.OK { + return nil, nil + } + + var ce Entry + ce.Key = val.MatchedKey + ce.URL = val.SignedDownloadURL + ce.IsAzureBlob = true + ce.client = c.opt.Client + + return &ce, nil +} + +func (c *Cache) newRequestV2(url string, body func() io.Reader) *request { + return &request{ + method: "POST", + url: url, + body: body, + headers: map[string]string{ + "Authorization": "Bearer " + c.Token.Raw, + "Content-Type": "application/json", + }, + } +} + +func (c *Cache) urlV2(p string) string { + return c.URL + "twirp/github.actions.results.api.v1.CacheService/" + p +} diff --git a/vendor/github.com/tonistiigi/go-actions-cache/docker-bake.hcl b/vendor/github.com/tonistiigi/go-actions-cache/docker-bake.hcl index c2e184d8c8..45f2758bdc 100644 --- a/vendor/github.com/tonistiigi/go-actions-cache/docker-bake.hcl +++ b/vendor/github.com/tonistiigi/go-actions-cache/docker-bake.hcl @@ -14,6 +14,19 @@ variable "ACTIONS_CACHE_URL" { default = null } +variable "ACTIONS_CACHE_SERVICE_V2" { + default = null +} + +variable "ACTIONS_RESULTS_URL" { + default = null +} + +variable "ACTIONS_CACHE_API_FORCE_VERSION" { + default = null +} + + group "default" { targets = ["test"] } @@ -25,6 +38,9 @@ target "test" { GO_VERSION = GO_VERSION GITHUB_REPOSITORY = GITHUB_REPOSITORY ACTIONS_CACHE_URL = ACTIONS_CACHE_URL + ACTIONS_CACHE_API_FORCE_VERSION = ACTIONS_CACHE_API_FORCE_VERSION + ACTIONS_CACHE_SERVICE_V2 = ACTIONS_CACHE_SERVICE_V2 + ACTIONS_RESULTS_URL = ACTIONS_RESULTS_URL } secret = [ "id=GITHUB_TOKEN,env=GITHUB_TOKEN", diff --git a/vendor/modules.txt b/vendor/modules.txt index 04c13faf99..c7d992476b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -28,6 +28,44 @@ github.com/AdaLogics/go-fuzz-headers # github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 ## explicit; go 1.18 github.com/AdamKorcz/go-118-fuzz-build/testing +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/azcore/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime +github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming +github.com/Azure/azure-sdk-for-go/sdk/azcore/to +github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/internal/diag +github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo +github.com/Azure/azure-sdk-for-go/sdk/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/internal/log +github.com/Azure/azure-sdk-for-go/sdk/internal/poller +github.com/Azure/azure-sdk-for-go/sdk/internal/temporal +github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas # github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c ## explicit; go 1.16 github.com/Azure/go-ansiterm @@ -717,7 +755,7 @@ github.com/mitchellh/hashstructure/v2 # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/moby/buildkit v0.19.0 +# github.com/moby/buildkit v0.20.0-rc1.0.20250212151618-2f7007eec399 ## explicit; go 1.22.0 github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/types @@ -797,6 +835,7 @@ github.com/moby/buildkit/solver/bboltcachestorage github.com/moby/buildkit/solver/errdefs github.com/moby/buildkit/solver/internal/pipe github.com/moby/buildkit/solver/llbsolver +github.com/moby/buildkit/solver/llbsolver/cdidevices github.com/moby/buildkit/solver/llbsolver/errdefs github.com/moby/buildkit/solver/llbsolver/file github.com/moby/buildkit/solver/llbsolver/mounts @@ -1136,7 +1175,7 @@ github.com/tonistiigi/dchapes-mode github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/copy github.com/tonistiigi/fsutil/types -# github.com/tonistiigi/go-actions-cache v0.0.0-20241210095730-017636a73805 +# github.com/tonistiigi/go-actions-cache v0.0.0-20250211194249-bd99cf5bbc65 ## explicit; go 1.22 github.com/tonistiigi/go-actions-cache # github.com/tonistiigi/go-archvariant v1.0.0