mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Merge pull request #49274 from thaJeztah/containerd_2_deps
vendor: containerd 2.0 / buildkit v0.19.0 dependencies
This commit is contained in:
23
vendor.mod
23
vendor.mod
@@ -12,11 +12,11 @@ require (
|
||||
cloud.google.com/go/logging v1.9.0
|
||||
code.cloudfoundry.org/clock v1.1.0
|
||||
dario.cat/mergo v1.0.1
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c
|
||||
github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0
|
||||
github.com/Microsoft/go-winio v0.6.2
|
||||
github.com/Microsoft/hcsshim v0.12.8
|
||||
github.com/Microsoft/hcsshim v0.12.9
|
||||
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27
|
||||
@@ -29,7 +29,7 @@ require (
|
||||
github.com/containerd/containerd v1.7.25
|
||||
github.com/containerd/containerd/api v1.8.0
|
||||
github.com/containerd/continuity v0.4.5
|
||||
github.com/containerd/errdefs v0.3.0
|
||||
github.com/containerd/errdefs v1.0.0
|
||||
github.com/containerd/fifo v1.1.0
|
||||
github.com/containerd/log v0.1.0
|
||||
github.com/containerd/platforms v0.2.1
|
||||
@@ -93,7 +93,7 @@ require (
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/tonistiigi/go-archvariant v1.0.0
|
||||
github.com/vbatts/tar-split v0.11.5
|
||||
github.com/vbatts/tar-split v0.11.6
|
||||
github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350
|
||||
github.com/vishvananda/netns v0.0.5
|
||||
go.etcd.io/bbolt v1.3.11
|
||||
@@ -120,7 +120,7 @@ require (
|
||||
require (
|
||||
cloud.google.com/go v0.112.0 // indirect
|
||||
cloud.google.com/go/longrunning v0.5.4 // indirect
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
|
||||
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
|
||||
@@ -142,12 +142,13 @@ require (
|
||||
github.com/cilium/ebpf v0.16.0 // indirect
|
||||
github.com/container-storage-interface/spec v1.5.0 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/go-cni v1.1.10 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/go-cni v1.1.11 // indirect
|
||||
github.com/containerd/go-runc v1.1.0 // indirect
|
||||
github.com/containerd/nydus-snapshotter v0.14.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/containerd/ttrpc v1.2.5 // indirect
|
||||
github.com/containernetworking/cni v1.2.2 // indirect
|
||||
github.com/containerd/ttrpc v1.2.7 // indirect
|
||||
github.com/containernetworking/cni v1.2.3 // indirect
|
||||
github.com/containernetworking/plugins v1.5.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
@@ -155,7 +156,7 @@ require (
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
@@ -193,7 +194,7 @@ require (
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/spdx/tools-golang v0.5.3 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
||||
github.com/tinylib/msgp v1.1.8 // indirect
|
||||
github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205 // indirect
|
||||
@@ -229,7 +230,7 @@ require (
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect; TODO(thaJeztah): should we keep this one aligned with the other google.golang.org/genproto/xxx modules?
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect
|
||||
)
|
||||
|
||||
48
vendor.sum
48
vendor.sum
@@ -13,10 +13,10 @@ code.cloudfoundry.org/clock v1.1.0 h1:XLzC6W3Ah/Y7ht1rmZ6+QfPdt1iGWEAAtIZXgiaj57
|
||||
code.cloudfoundry.org/clock v1.1.0/go.mod h1:yA3fxddT9RINQL2XHS7PS+OXxKCGhfrZmlNUCIM6AKo=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
@@ -25,8 +25,8 @@ github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glg
|
||||
github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0/go.mod h1:fBaQWrftOD5CrVCUfoYGHs4X4VViTuGOXA8WloCjTY0=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.12.8 h1:BtDWYlFMcWhorrvSSo2M7z0csPdw6t7no/C3FsSvqiI=
|
||||
github.com/Microsoft/hcsshim v0.12.8/go.mod h1:cibQ4BqhJ32FXDwPdQhKhwrwophnh3FuT4nwQZF907w=
|
||||
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
|
||||
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
|
||||
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 h1:vX+gnvBc56EbWYrmlhYbFYRaeikAke1GL84N4BEYOFE=
|
||||
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
@@ -117,12 +117,14 @@ github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVM
|
||||
github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
|
||||
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
|
||||
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/go-cni v1.1.10 h1:c2U73nld7spSWfiJwSh/8W9DK+/qQwYM2rngIhCyhyg=
|
||||
github.com/containerd/go-cni v1.1.10/go.mod h1:/Y/sL8yqYQn1ZG1om1OncJB1W4zN3YmjfP/ShCzG/OY=
|
||||
github.com/containerd/go-cni v1.1.11 h1:fWt1K15AmSLsEfa57N+qYw4NeGPiQKYq1pjNGJwV9mc=
|
||||
github.com/containerd/go-cni v1.1.11/go.mod h1:/Y/sL8yqYQn1ZG1om1OncJB1W4zN3YmjfP/ShCzG/OY=
|
||||
github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA=
|
||||
github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
@@ -133,12 +135,12 @@ github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpS
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU=
|
||||
github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ=
|
||||
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
|
||||
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||
github.com/containernetworking/cni v1.2.2 h1:9IbP6KJQQxVKo4hhnm8r50YcVKrJbJu3Dqw+Rbt1vYk=
|
||||
github.com/containernetworking/cni v1.2.2/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
||||
github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM=
|
||||
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
||||
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
|
||||
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
@@ -184,13 +186,12 @@ github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee h1:v6Eju/FhxsACGN
|
||||
github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c=
|
||||
github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg=
|
||||
github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -514,8 +515,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tedsuo/ifrit v0.0.0-20230330192023-5cba443a66c4 h1:MGZzzxBuPuK4J0XQo+0uy0NnXQGKzHXhYp5oG1Wy860=
|
||||
@@ -538,8 +539,8 @@ github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Q
|
||||
github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
|
||||
github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
|
||||
github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 h1:w5OI+kArIBVksl8UGn6ARQshtPCQvDsbuA9NQie3GIg=
|
||||
github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
@@ -701,7 +702,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -798,8 +798,8 @@ gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
kernel.org/pub/linux/libs/security/libcap/cap v1.2.70 h1:QnLPkuDWWbD5C+3DUA2IUXai5TK6w2zff+MAGccqdsw=
|
||||
kernel.org/pub/linux/libs/security/libcap/cap v1.2.70/go.mod h1:/iBwcj9nbLejQitYvUm9caurITQ6WyNHibJk6Q9fiS4=
|
||||
kernel.org/pub/linux/libs/security/libcap/psx v1.2.70 h1:HsB2G/rEQiYyo1bGoQqHZ/Bvd6x1rERQTNdPr1FyWjI=
|
||||
|
||||
48
vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go
generated
vendored
48
vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go
generated
vendored
@@ -48,6 +48,7 @@ type ConsumeFuzzer struct {
|
||||
NumberOfCalls int
|
||||
position uint32
|
||||
fuzzUnexportedFields bool
|
||||
forceUTF8Strings bool
|
||||
curDepth int
|
||||
Funcs map[reflect.Type]reflect.Value
|
||||
}
|
||||
@@ -104,6 +105,14 @@ func (f *ConsumeFuzzer) DisallowUnexportedFields() {
|
||||
f.fuzzUnexportedFields = false
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) AllowNonUTF8Strings() {
|
||||
f.forceUTF8Strings = false
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) DisallowNonUTF8Strings() {
|
||||
f.forceUTF8Strings = true
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error {
|
||||
e := reflect.ValueOf(targetStruct).Elem()
|
||||
return f.fuzzStruct(e, false)
|
||||
@@ -224,6 +233,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error
|
||||
if e.CanSet() {
|
||||
e.Set(uu)
|
||||
}
|
||||
case reflect.Uint:
|
||||
newInt, err := f.GetUint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Uint16:
|
||||
newInt, err := f.GetUint16()
|
||||
if err != nil {
|
||||
@@ -309,6 +326,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(b))
|
||||
}
|
||||
case reflect.Bool:
|
||||
b, err := f.GetBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetBool(b)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -410,6 +435,23 @@ func (f *ConsumeFuzzer) GetUint64() (uint64, error) {
|
||||
return binary.BigEndian.Uint64(u64), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint() (uint, error) {
|
||||
var zero uint
|
||||
size := int(unsafe.Sizeof(zero))
|
||||
if size == 8 {
|
||||
u64, err := f.GetUint64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint(u64), nil
|
||||
}
|
||||
u32, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint(u32), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetBytes() ([]byte, error) {
|
||||
var length uint32
|
||||
var err error
|
||||
@@ -461,7 +503,11 @@ func (f *ConsumeFuzzer) GetString() (string, error) {
|
||||
return "nil", errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return string(f.data[byteBegin:f.position]), nil
|
||||
s := string(f.data[byteBegin:f.position])
|
||||
if f.forceUTF8Strings {
|
||||
s = strings.ToValidUTF8(s, "")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetBool() (bool, error) {
|
||||
|
||||
88
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go
generated
vendored
88
vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go
generated
vendored
@@ -41,147 +41,119 @@ func (f *F) Fuzz(ff any) {
|
||||
args := []reflect.Value{reflect.ValueOf(f.T)}
|
||||
fuzzConsumer := fuzz.NewConsumer(f.Data)
|
||||
for _, v := range types {
|
||||
//fmt.Printf("arg %v\n", v)
|
||||
newElem := reflect.New(v).Elem()
|
||||
switch v.String() {
|
||||
case "[]uint8":
|
||||
b, err := fuzzConsumer.GetBytes()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newBytes := reflect.New(v)
|
||||
newBytes.Elem().SetBytes(b)
|
||||
args = append(args, newBytes.Elem())
|
||||
newElem.SetBytes(b)
|
||||
case "string":
|
||||
s, err := fuzzConsumer.GetString()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newString := reflect.New(v)
|
||||
newString.Elem().SetString(s)
|
||||
args = append(args, newString.Elem())
|
||||
newElem.SetString(s)
|
||||
case "int":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
randInt, err := fuzzConsumer.GetUint64()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
newElem.SetInt(int64(int(randInt)))
|
||||
case "int8":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
randInt, err := fuzzConsumer.GetByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
newElem.SetInt(int64(randInt))
|
||||
case "int16":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
randInt, err := fuzzConsumer.GetUint16()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
newElem.SetInt(int64(randInt))
|
||||
case "int32":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
randInt, err := fuzzConsumer.GetUint32()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
newElem.SetInt(int64(int32(randInt)))
|
||||
case "int64":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
randInt, err := fuzzConsumer.GetUint64()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newInt := reflect.New(v)
|
||||
newInt.Elem().SetInt(int64(randInt))
|
||||
args = append(args, newInt.Elem())
|
||||
newElem.SetInt(int64(randInt))
|
||||
case "uint":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
randInt, err := fuzzConsumer.GetUint64()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint := reflect.New(v)
|
||||
newUint.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint.Elem())
|
||||
newElem.SetUint(uint64(uint(randInt)))
|
||||
case "uint8":
|
||||
randInt, err := fuzzConsumer.GetInt()
|
||||
randInt, err := fuzzConsumer.GetByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint := reflect.New(v)
|
||||
newUint.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint.Elem())
|
||||
newElem.SetUint(uint64(randInt))
|
||||
case "uint16":
|
||||
randInt, err := fuzzConsumer.GetUint16()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint16 := reflect.New(v)
|
||||
newUint16.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint16.Elem())
|
||||
newElem.SetUint(uint64(randInt))
|
||||
case "uint32":
|
||||
randInt, err := fuzzConsumer.GetUint32()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint32 := reflect.New(v)
|
||||
newUint32.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint32.Elem())
|
||||
newElem.SetUint(uint64(randInt))
|
||||
case "uint64":
|
||||
randInt, err := fuzzConsumer.GetUint64()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newUint64 := reflect.New(v)
|
||||
newUint64.Elem().SetUint(uint64(randInt))
|
||||
args = append(args, newUint64.Elem())
|
||||
newElem.SetUint(uint64(randInt))
|
||||
case "rune":
|
||||
randRune, err := fuzzConsumer.GetRune()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newRune := reflect.New(v)
|
||||
newRune.Elem().Set(reflect.ValueOf(randRune))
|
||||
args = append(args, newRune.Elem())
|
||||
newElem.Set(reflect.ValueOf(randRune))
|
||||
case "float32":
|
||||
randFloat, err := fuzzConsumer.GetFloat32()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newFloat := reflect.New(v)
|
||||
newFloat.Elem().Set(reflect.ValueOf(randFloat))
|
||||
args = append(args, newFloat.Elem())
|
||||
newElem.Set(reflect.ValueOf(randFloat))
|
||||
case "float64":
|
||||
randFloat, err := fuzzConsumer.GetFloat64()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newFloat := reflect.New(v)
|
||||
newFloat.Elem().Set(reflect.ValueOf(randFloat))
|
||||
args = append(args, newFloat.Elem())
|
||||
newElem.Set(reflect.ValueOf(randFloat))
|
||||
case "bool":
|
||||
randBool, err := fuzzConsumer.GetBool()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newBool := reflect.New(v)
|
||||
newBool.Elem().Set(reflect.ValueOf(randBool))
|
||||
args = append(args, newBool.Elem())
|
||||
newElem.Set(reflect.ValueOf(randBool))
|
||||
default:
|
||||
fmt.Println(v.String())
|
||||
panic(fmt.Sprintf("unsupported type: %s", v.String()))
|
||||
}
|
||||
args = append(args, newElem)
|
||||
|
||||
}
|
||||
fn.Call(args)
|
||||
}
|
||||
func (f *F) Helper() {}
|
||||
func (c *F) Log(args ...any) {
|
||||
fmt.Println(args...)
|
||||
fmt.Print(args...)
|
||||
}
|
||||
func (c *F) Logf(format string, args ...any) {
|
||||
fmt.Println(format, args)
|
||||
fmt.Println(fmt.Sprintf(format, args...))
|
||||
}
|
||||
func (c *F) Name() string { return "libFuzzer" }
|
||||
func (c *F) Setenv(key, value string) {}
|
||||
|
||||
11
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
@@ -188,7 +188,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||
return nil, winapi.RtlNtStatusToDosError(status)
|
||||
}
|
||||
} else {
|
||||
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, 0, unicodeJobName.Buffer)
|
||||
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -523,12 +523,9 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error
|
||||
func isJobSilo(h windows.Handle) bool {
|
||||
// None of the information from the structure that this info class expects will be used, this is just used as
|
||||
// the call will fail if the job hasn't been upgraded to a silo so we can use this to tell when we open a job
|
||||
// if it's a silo or not. Because none of the info matters simply define a dummy struct with the size that the call
|
||||
// expects which is 16 bytes.
|
||||
type isSiloObj struct {
|
||||
_ [16]byte
|
||||
}
|
||||
var siloInfo isSiloObj
|
||||
// if it's a silo or not. We still need to define the struct layout as expected by Win32, else the struct
|
||||
// alignment might be different and the call will fail.
|
||||
var siloInfo winapi.SILOOBJECT_BASIC_INFORMATION
|
||||
err := winapi.QueryInformationJobObject(
|
||||
h,
|
||||
winapi.JobObjectSiloBasicInformation,
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
errdefs "github.com/containerd/errdefs/pkg/errgrpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
19
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
19
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
@@ -28,7 +28,7 @@ const (
|
||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
|
||||
const (
|
||||
JOB_OBJECT_QUERY = 0x0004
|
||||
JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
||||
JOB_OBJECT_ALL_ACCESS = 0x1F003F
|
||||
)
|
||||
|
||||
// IO limit flags
|
||||
@@ -160,6 +160,21 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
CompletionPort windows.Handle
|
||||
}
|
||||
|
||||
// typedef struct _SILOOBJECT_BASIC_INFORMATION {
|
||||
// DWORD SiloId;
|
||||
// DWORD SiloParentId;
|
||||
// DWORD NumberOfProcesses;
|
||||
// BOOLEAN IsInServerSilo;
|
||||
// BYTE Reserved[3];
|
||||
// } SILOOBJECT_BASIC_INFORMATION, *PSILOOBJECT_BASIC_INFORMATION;
|
||||
type SILOOBJECT_BASIC_INFORMATION struct {
|
||||
SiloID uint32
|
||||
SiloParentID uint32
|
||||
NumberOfProcesses uint32
|
||||
IsInServerSilo bool
|
||||
Reserved [3]uint8
|
||||
}
|
||||
|
||||
// BOOL IsProcessInJob(
|
||||
// HANDLE ProcessHandle,
|
||||
// HANDLE JobHandle,
|
||||
@@ -184,7 +199,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
// LPCWSTR lpName
|
||||
// );
|
||||
//
|
||||
//sys OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW
|
||||
//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW
|
||||
|
||||
// DWORD SetIoRateControlInformationJobObject(
|
||||
// HANDLE hJob,
|
||||
|
||||
8
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@@ -470,8 +470,12 @@ func LocalFree(ptr uintptr) {
|
||||
return
|
||||
}
|
||||
|
||||
func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName)))
|
||||
func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) {
|
||||
var _p0 uint32
|
||||
if inheritHandle {
|
||||
_p0 = 1
|
||||
}
|
||||
r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName)))
|
||||
handle = windows.Handle(r0)
|
||||
if handle == 0 {
|
||||
err = errnoErr(e1)
|
||||
|
||||
191
vendor/github.com/containerd/errdefs/pkg/LICENSE
generated
vendored
Normal file
191
vendor/github.com/containerd/errdefs/pkg/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
353
vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
generated
vendored
Normal file
353
vendor/github.com/containerd/errdefs/pkg/errgrpc/grpc.go
generated
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errgrpc provides utility functions for translating errors to
|
||||
// and from a gRPC context.
|
||||
//
|
||||
// The functions ToGRPC and ToNative can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/protoadapt"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/containerd/typeurl/v2"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/internal/cause"
|
||||
"github.com/containerd/errdefs/pkg/internal/types"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the error into a grpc error, from the error types
|
||||
// defined in the the errdefs package and attempign to preserve the original
|
||||
// description. Any type which does not resolve to a defined error type will
|
||||
// be assigned the unknown error code.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type. The grpc error details will be used to attempt to preserve as much of
|
||||
// the error structures and types as possible.
|
||||
//
|
||||
// Errors which can be marshaled using protobuf or typeurl will be considered
|
||||
// for including as GRPC error details.
|
||||
// Additionally, use the following interfaces in errors to preserve custom types:
|
||||
//
|
||||
// WrapError(error) error - Used to wrap the previous error
|
||||
// JoinErrors(...error) error - Used to join all previous errors
|
||||
// CollapseError() - Used for errors which carry information but
|
||||
// should not have their error message shown.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := status.FromError(err); ok {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
st := statusFromError(err)
|
||||
if st != nil {
|
||||
if details := errorDetails(err, false); len(details) > 0 {
|
||||
if ds, _ := st.WithDetails(details...); ds != nil {
|
||||
st = ds
|
||||
}
|
||||
}
|
||||
err = st.Err()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func statusFromError(err error) *status.Status {
|
||||
switch errdefs.Resolve(err) {
|
||||
case errdefs.ErrInvalidArgument:
|
||||
return status.New(codes.InvalidArgument, err.Error())
|
||||
case errdefs.ErrNotFound:
|
||||
return status.New(codes.NotFound, err.Error())
|
||||
case errdefs.ErrAlreadyExists:
|
||||
return status.New(codes.AlreadyExists, err.Error())
|
||||
case errdefs.ErrPermissionDenied:
|
||||
return status.New(codes.PermissionDenied, err.Error())
|
||||
case errdefs.ErrResourceExhausted:
|
||||
return status.New(codes.ResourceExhausted, err.Error())
|
||||
case errdefs.ErrFailedPrecondition, errdefs.ErrConflict, errdefs.ErrNotModified:
|
||||
return status.New(codes.FailedPrecondition, err.Error())
|
||||
case errdefs.ErrAborted:
|
||||
return status.New(codes.Aborted, err.Error())
|
||||
case errdefs.ErrOutOfRange:
|
||||
return status.New(codes.OutOfRange, err.Error())
|
||||
case errdefs.ErrNotImplemented:
|
||||
return status.New(codes.Unimplemented, err.Error())
|
||||
case errdefs.ErrInternal:
|
||||
return status.New(codes.Internal, err.Error())
|
||||
case errdefs.ErrUnavailable:
|
||||
return status.New(codes.Unavailable, err.Error())
|
||||
case errdefs.ErrDataLoss:
|
||||
return status.New(codes.DataLoss, err.Error())
|
||||
case errdefs.ErrUnauthenticated:
|
||||
return status.New(codes.Unauthenticated, err.Error())
|
||||
case context.DeadlineExceeded:
|
||||
return status.New(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.New(codes.Canceled, err.Error())
|
||||
case errdefs.ErrUnknown:
|
||||
return status.New(codes.Unknown, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorDetails returns an array of errors which make up the provided error.
|
||||
// If firstIncluded is true, then all encodable errors will be used, otherwise
|
||||
// the first error in an error list will be not be used, to account for the
|
||||
// the base status error which details are added to via wrap or join.
|
||||
//
|
||||
// The errors are ordered in way that they can be applied in order by either
|
||||
// wrapping or joining the errors to recreate an error with the same structure
|
||||
// when `WrapError` and `JoinErrors` interfaces are used.
|
||||
//
|
||||
// The intent is that when re-applying the errors to create a single error, the
|
||||
// results of calls to `Error()`, `errors.Is`, `errors.As`, and "%+v" formatting
|
||||
// is the same as the original error.
|
||||
func errorDetails(err error, firstIncluded bool) []protoadapt.MessageV1 {
|
||||
switch uerr := err.(type) {
|
||||
case interface{ Unwrap() error }:
|
||||
details := errorDetails(uerr.Unwrap(), firstIncluded)
|
||||
|
||||
// If the type is able to wrap, then include if proto
|
||||
if _, ok := err.(interface{ WrapError(error) error }); ok {
|
||||
// Get proto message
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
details = append(details, protoErr)
|
||||
}
|
||||
}
|
||||
|
||||
return details
|
||||
case interface{ Unwrap() []error }:
|
||||
var details []protoadapt.MessageV1
|
||||
for i, e := range uerr.Unwrap() {
|
||||
details = append(details, errorDetails(e, firstIncluded || i > 0)...)
|
||||
}
|
||||
|
||||
if _, ok := err.(interface{ JoinErrors(...error) error }); ok {
|
||||
// Get proto message
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
details = append(details, protoErr)
|
||||
}
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
if firstIncluded {
|
||||
if protoErr := toProtoMessage(err); protoErr != nil {
|
||||
return []protoadapt.MessageV1{protoErr}
|
||||
}
|
||||
if gs, ok := status.FromError(ToGRPC(err)); ok {
|
||||
return []protoadapt.MessageV1{gs.Proto()}
|
||||
}
|
||||
// TODO: Else include unknown extra error type?
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toProtoMessage(err error) protoadapt.MessageV1 {
|
||||
// Do not double encode proto messages, otherwise use Any
|
||||
if pm, ok := err.(protoadapt.MessageV1); ok {
|
||||
return pm
|
||||
}
|
||||
if pm, ok := err.(proto.Message); ok {
|
||||
return protoadapt.MessageV1Of(pm)
|
||||
}
|
||||
|
||||
if reflect.TypeOf(err).Kind() == reflect.Ptr {
|
||||
a, aerr := typeurl.MarshalAny(err)
|
||||
if aerr == nil {
|
||||
return &anypb.Any{
|
||||
TypeUrl: a.GetTypeUrl(),
|
||||
Value: a.GetValue(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to grpc.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// ToNative returns the underlying error from a grpc service based on the grpc
|
||||
// error code. The grpc details are used to add wrap the error in more context
|
||||
// or support multiple errors.
|
||||
func ToNative(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, isGRPC := status.FromError(err)
|
||||
|
||||
var (
|
||||
desc string
|
||||
code codes.Code
|
||||
)
|
||||
|
||||
if isGRPC {
|
||||
desc = s.Message()
|
||||
code = s.Code()
|
||||
} else {
|
||||
desc = err.Error()
|
||||
code = codes.Unknown
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code {
|
||||
case codes.InvalidArgument:
|
||||
cls = errdefs.ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = errdefs.ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = errdefs.ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = errdefs.ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
// TODO: Has suffix is not sufficient for conflict and not modified
|
||||
// Message should start with ": " or be at beginning of a line
|
||||
// Message should end with ": " or be at the end of a line
|
||||
// Compile a regex
|
||||
if desc == errdefs.ErrConflict.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrConflict.Error()) {
|
||||
cls = errdefs.ErrConflict
|
||||
} else if desc == errdefs.ErrNotModified.Error() || strings.HasSuffix(desc, ": "+errdefs.ErrNotModified.Error()) {
|
||||
cls = errdefs.ErrNotModified
|
||||
} else {
|
||||
cls = errdefs.ErrFailedPrecondition
|
||||
}
|
||||
case codes.Unimplemented:
|
||||
cls = errdefs.ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
case codes.Aborted:
|
||||
cls = errdefs.ErrAborted
|
||||
case codes.Unauthenticated:
|
||||
cls = errdefs.ErrUnauthenticated
|
||||
case codes.PermissionDenied:
|
||||
cls = errdefs.ErrPermissionDenied
|
||||
case codes.Internal:
|
||||
cls = errdefs.ErrInternal
|
||||
case codes.DataLoss:
|
||||
cls = errdefs.ErrDataLoss
|
||||
case codes.OutOfRange:
|
||||
cls = errdefs.ErrOutOfRange
|
||||
case codes.ResourceExhausted:
|
||||
cls = errdefs.ErrResourceExhausted
|
||||
default:
|
||||
if idx := strings.LastIndex(desc, cause.UnexpectedStatusPrefix); idx > 0 {
|
||||
if status, uerr := strconv.Atoi(desc[idx+len(cause.UnexpectedStatusPrefix):]); uerr == nil && status >= 200 && status < 600 {
|
||||
cls = cause.ErrUnexpectedStatus{Status: status}
|
||||
}
|
||||
}
|
||||
if cls == nil {
|
||||
cls = errdefs.ErrUnknown
|
||||
}
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, desc)
|
||||
if msg == "" {
|
||||
err = cls
|
||||
} else if msg != desc {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else if wm, ok := cls.(interface{ WithMessage(string) error }); ok {
|
||||
err = wm.WithMessage(msg)
|
||||
} else {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
}
|
||||
|
||||
if isGRPC {
|
||||
errs := []error{err}
|
||||
for _, a := range s.Details() {
|
||||
var derr error
|
||||
|
||||
// First decode error if needed
|
||||
if s, ok := a.(*spb.Status); ok {
|
||||
derr = ToNative(status.ErrorProto(s))
|
||||
} else if e, ok := a.(error); ok {
|
||||
derr = e
|
||||
} else if dany, ok := a.(typeurl.Any); ok {
|
||||
i, uerr := typeurl.UnmarshalAny(dany)
|
||||
if uerr == nil {
|
||||
if e, ok = i.(error); ok {
|
||||
derr = e
|
||||
} else {
|
||||
derr = fmt.Errorf("non-error unmarshalled detail: %v", i)
|
||||
}
|
||||
} else {
|
||||
derr = fmt.Errorf("error of type %q with failure to unmarshal: %v", dany.GetTypeUrl(), uerr)
|
||||
}
|
||||
} else {
|
||||
derr = fmt.Errorf("non-error detail: %v", a)
|
||||
}
|
||||
|
||||
switch werr := derr.(type) {
|
||||
case interface{ WrapError(error) error }:
|
||||
errs[len(errs)-1] = werr.WrapError(errs[len(errs)-1])
|
||||
case interface{ JoinErrors(...error) error }:
|
||||
// TODO: Consider whether this should support joining a subset
|
||||
errs[0] = werr.JoinErrors(errs...)
|
||||
case interface{ CollapseError() }:
|
||||
errs[len(errs)-1] = types.CollapsedError(errs[len(errs)-1], derr)
|
||||
default:
|
||||
errs = append(errs, derr)
|
||||
}
|
||||
|
||||
}
|
||||
if len(errs) > 1 {
|
||||
err = errors.Join(errs...)
|
||||
} else {
|
||||
err = errs[0]
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, desc string) string {
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
33
vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
generated
vendored
Normal file
33
vendor/github.com/containerd/errdefs/pkg/internal/cause/cause.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cause is used to define root causes for errors
|
||||
// common to errors packages like grpc and http.
|
||||
package cause
|
||||
|
||||
import "fmt"
|
||||
|
||||
type ErrUnexpectedStatus struct {
|
||||
Status int
|
||||
}
|
||||
|
||||
const UnexpectedStatusPrefix = "unexpected status "
|
||||
|
||||
func (e ErrUnexpectedStatus) Error() string {
|
||||
return fmt.Sprintf("%s%d", UnexpectedStatusPrefix, e.Status)
|
||||
}
|
||||
|
||||
func (ErrUnexpectedStatus) Unknown() {}
|
||||
57
vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
generated
vendored
Normal file
57
vendor/github.com/containerd/errdefs/pkg/internal/types/collapsible.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import "fmt"
|
||||
|
||||
// CollapsibleError indicates the error should be collapsed
|
||||
type CollapsibleError interface {
|
||||
CollapseError()
|
||||
}
|
||||
|
||||
// CollapsedError returns a new error with the collapsed
|
||||
// error returned on unwrapped or when formatted with "%+v"
|
||||
func CollapsedError(err error, collapsed ...error) error {
|
||||
return collapsedError{err, collapsed}
|
||||
}
|
||||
|
||||
type collapsedError struct {
|
||||
error
|
||||
collapsed []error
|
||||
}
|
||||
|
||||
func (c collapsedError) Unwrap() []error {
|
||||
return append([]error{c.error}, c.collapsed...)
|
||||
}
|
||||
|
||||
func (c collapsedError) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", c.error)
|
||||
for _, err := range c.collapsed {
|
||||
fmt.Fprintf(s, "\n%+v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
fmt.Fprint(s, c.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", c.Error())
|
||||
}
|
||||
}
|
||||
2
vendor/github.com/containerd/go-cni/README.md
generated
vendored
2
vendor/github.com/containerd/go-cni/README.md
generated
vendored
@@ -13,7 +13,7 @@ A generic CNI library to provide APIs for CNI plugin interactions. The library p
|
||||
- Query status of CNI network plugin initialization
|
||||
- Check verifies the network is still in desired state
|
||||
|
||||
go-cni aims to support plugins that implement [Container Network Interface](https://github.com/containernetworking/cni)
|
||||
go-cni aims to support plugins that implement the [Container Network Interface](https://github.com/containernetworking/cni).
|
||||
|
||||
## Usage
|
||||
```go
|
||||
|
||||
39
vendor/github.com/containerd/go-cni/cni.go
generated
vendored
39
vendor/github.com/containerd/go-cni/cni.go
generated
vendored
@@ -135,11 +135,20 @@ func (c *libcni) Load(opts ...Opt) error {
|
||||
|
||||
// Status returns the status of CNI initialization.
|
||||
func (c *libcni) Status() error {
|
||||
if err := c.ready(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
if len(c.networks) < c.networkCount {
|
||||
return ErrCNINotInitialized
|
||||
// STATUS is only called for CNI Version 1.1.0 or greater. It is ignored for previous versions.
|
||||
for _, v := range c.networks {
|
||||
err := c.cniConfig.GetStatusNetworkList(context.Background(), v.config)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -153,9 +162,11 @@ func (c *libcni) Networks() []*Network {
|
||||
|
||||
// Setup setups the network in the namespace and returns a Result
|
||||
func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) {
|
||||
if err := c.Status(); err != nil {
|
||||
if err := c.ready(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
ns, err := newNamespace(id, path, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -169,9 +180,11 @@ func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...Name
|
||||
|
||||
// SetupSerially setups the network in the namespace and returns a Result
|
||||
func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) {
|
||||
if err := c.Status(); err != nil {
|
||||
if err := c.ready(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
ns, err := newNamespace(id, path, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -232,9 +245,11 @@ func (c *libcni) attachNetworks(ctx context.Context, ns *Namespace) ([]*types100
|
||||
|
||||
// Remove removes the network config from the namespace
|
||||
func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error {
|
||||
if err := c.Status(); err != nil {
|
||||
if err := c.ready(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
ns, err := newNamespace(id, path, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -260,9 +275,11 @@ func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...Nam
|
||||
|
||||
// Check checks if the network is still in desired state
|
||||
func (c *libcni) Check(ctx context.Context, id string, path string, opts ...NamespaceOpts) error {
|
||||
if err := c.Status(); err != nil {
|
||||
if err := c.ready(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
ns, err := newNamespace(id, path, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -310,3 +327,13 @@ func (c *libcni) GetConfig() *ConfigResult {
|
||||
func (c *libcni) reset() {
|
||||
c.networks = nil
|
||||
}
|
||||
|
||||
func (c *libcni) ready() error {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
if len(c.networks) < c.networkCount {
|
||||
return ErrCNINotInitialized
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
61
vendor/github.com/containerd/go-cni/testutils.go
generated
vendored
61
vendor/github.com/containerd/go-cni/testutils.go
generated
vendored
@@ -75,3 +75,64 @@ func tearDownCNIConfig(t *testing.T, confDir string) {
|
||||
t.Fatalf("Failed to cleanup CNI configs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildFakeConfig(t *testing.T) (string, string) {
|
||||
conf := `
|
||||
{
|
||||
"cniVersion": "1.1.0",
|
||||
"name": "containerd-net",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"promiscMode": true,
|
||||
"ipam": {
|
||||
"type": "host-ipam",
|
||||
"ranges": [
|
||||
[{
|
||||
"subnet": "10.88.0.0/16"
|
||||
}],
|
||||
[{
|
||||
"subnet": "2001:4860:4860::/64"
|
||||
}]
|
||||
],
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" },
|
||||
{ "dst": "::/0" }
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {"portMappings": true}
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
cniDir, err := makeTmpDir("fakecni")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin config dir: %v", err)
|
||||
}
|
||||
|
||||
cniConfDir := path.Join(cniDir, "net.d")
|
||||
err = os.MkdirAll(cniConfDir, 0777)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create network config dir: %v", err)
|
||||
}
|
||||
|
||||
networkConfig1 := path.Join(cniConfDir, "mocknetwork1.conflist")
|
||||
f1, err := os.Create(networkConfig1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create network config %v: %v", f1, err)
|
||||
}
|
||||
|
||||
_, err = f1.WriteString(conf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write network config file %v: %v", f1, err)
|
||||
}
|
||||
f1.Close()
|
||||
|
||||
return cniDir, cniConfDir
|
||||
}
|
||||
|
||||
8
vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
8
vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
@@ -143,10 +143,10 @@ func (ch *channel) recv() (messageHeader, []byte, error) {
|
||||
}
|
||||
|
||||
func (ch *channel) send(streamID uint32, t messageType, flags uint8, p []byte) error {
|
||||
// TODO: Error on send rather than on recv
|
||||
//if len(p) > messageLengthMax {
|
||||
// return status.Errorf(codes.InvalidArgument, "refusing to send, message length %v exceed maximum message size of %v", len(p), messageLengthMax)
|
||||
//}
|
||||
if len(p) > messageLengthMax {
|
||||
return OversizedMessageError(len(p))
|
||||
}
|
||||
|
||||
if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t, Flags: flags}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
48
vendor/github.com/containerd/ttrpc/errors.go
generated
vendored
48
vendor/github.com/containerd/ttrpc/errors.go
generated
vendored
@@ -16,7 +16,12 @@
|
||||
|
||||
package ttrpc
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrProtocol is a general error in the handling the protocol.
|
||||
@@ -32,3 +37,44 @@ var (
|
||||
// ErrStreamClosed is when the streaming connection is closed.
|
||||
ErrStreamClosed = errors.New("ttrpc: stream closed")
|
||||
)
|
||||
|
||||
// OversizedMessageErr is used to indicate refusal to send an oversized message.
|
||||
// It wraps a ResourceExhausted grpc Status together with the offending message
|
||||
// length.
|
||||
type OversizedMessageErr struct {
|
||||
messageLength int
|
||||
err error
|
||||
}
|
||||
|
||||
// OversizedMessageError returns an OversizedMessageErr error for the given message
|
||||
// length if it exceeds the allowed maximum. Otherwise a nil error is returned.
|
||||
func OversizedMessageError(messageLength int) error {
|
||||
if messageLength <= messageLengthMax {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &OversizedMessageErr{
|
||||
messageLength: messageLength,
|
||||
err: status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", messageLength, messageLengthMax),
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the error message for the corresponding grpc Status for the error.
|
||||
func (e *OversizedMessageErr) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the corresponding error with our grpc status code.
|
||||
func (e *OversizedMessageErr) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// RejectedLength retrieves the rejected message length which triggered the error.
|
||||
func (e *OversizedMessageErr) RejectedLength() int {
|
||||
return e.messageLength
|
||||
}
|
||||
|
||||
// MaximumLength retrieves the maximum allowed message length that triggered the error.
|
||||
func (*OversizedMessageErr) MaximumLength() int {
|
||||
return messageLengthMax
|
||||
}
|
||||
|
||||
28
vendor/github.com/containerd/ttrpc/metadata.go
generated
vendored
28
vendor/github.com/containerd/ttrpc/metadata.go
generated
vendored
@@ -62,6 +62,34 @@ func (m MD) Append(key string, values ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a copy of MD or nil if it's nil.
|
||||
// It's copied from golang's `http.Header.Clone` implementation:
|
||||
// https://cs.opensource.google/go/go/+/refs/tags/go1.23.4:src/net/http/header.go;l=94
|
||||
func (m MD) Clone() MD {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find total number of values.
|
||||
nv := 0
|
||||
for _, vv := range m {
|
||||
nv += len(vv)
|
||||
}
|
||||
sv := make([]string, nv) // shared backing array for headers' values
|
||||
m2 := make(MD, len(m))
|
||||
for k, vv := range m {
|
||||
if vv == nil {
|
||||
// Preserve nil values.
|
||||
m2[k] = nil
|
||||
continue
|
||||
}
|
||||
n := copy(sv, vv)
|
||||
m2[k] = sv[:n:n]
|
||||
sv = sv[n:]
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
||||
func (m MD) setRequest(r *Request) {
|
||||
for k, values := range m {
|
||||
for _, v := range values {
|
||||
|
||||
15
vendor/github.com/containerd/ttrpc/server.go
generated
vendored
15
vendor/github.com/containerd/ttrpc/server.go
generated
vendored
@@ -74,9 +74,18 @@ func (s *Server) RegisterService(name string, desc *ServiceDesc) {
|
||||
}
|
||||
|
||||
func (s *Server) Serve(ctx context.Context, l net.Listener) error {
|
||||
s.addListener(l)
|
||||
s.mu.Lock()
|
||||
s.addListenerLocked(l)
|
||||
defer s.closeListener(l)
|
||||
|
||||
select {
|
||||
case <-s.done:
|
||||
s.mu.Unlock()
|
||||
return ErrServerClosed
|
||||
default:
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
var (
|
||||
backoff time.Duration
|
||||
handshaker = s.config.handshaker
|
||||
@@ -188,9 +197,7 @@ func (s *Server) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) addListener(l net.Listener) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
func (s *Server) addListenerLocked(l net.Listener) {
|
||||
s.listeners[l] = struct{}{}
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/containernetworking/cni/libcni/api.go
generated
vendored
2
vendor/github.com/containernetworking/cni/libcni/api.go
generated
vendored
@@ -817,6 +817,8 @@ func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList,
|
||||
}
|
||||
if args != nil {
|
||||
inject["cni.dev/valid-attachments"] = args.ValidAttachments
|
||||
// #1101: spec used incorrect variable name
|
||||
inject["cni.dev/attachments"] = args.ValidAttachments
|
||||
}
|
||||
|
||||
for _, plugin := range list.Plugins {
|
||||
|
||||
13
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
13
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
freebsd_task:
|
||||
name: 'FreeBSD'
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
install_script:
|
||||
- pkg update -f
|
||||
- pkg install -y go
|
||||
test_script:
|
||||
# run tests as user "cirrus" instead of root
|
||||
- pw useradd cirrus -m
|
||||
- chown -R cirrus:cirrus .
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
1
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
@@ -4,3 +4,4 @@
|
||||
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
/fsnotify.exe
|
||||
|
||||
87
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
87
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@@ -1,16 +1,87 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
Unreleased
|
||||
----------
|
||||
Nothing yet.
|
||||
|
||||
## [1.6.0] - 2022-10-13
|
||||
1.7.0 - 2023-10-22
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.17.
|
||||
|
||||
### Additions
|
||||
|
||||
- illumos: add FEN backend to support illumos and Solaris. ([#371])
|
||||
|
||||
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
|
||||
in cases where you can't control the kernel buffer and receive a large number
|
||||
of events in bursts. ([#550], [#572])
|
||||
|
||||
- all: add `AddWith()`, which is identical to `Add()` but allows passing
|
||||
options. ([#521])
|
||||
|
||||
- windows: allow setting the ReadDirectoryChangesW() buffer size with
|
||||
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
|
||||
works on all platforms and is enough for most purposes, but in some cases a
|
||||
highest buffer is needed. ([#521])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: remove watcher if a watched path is renamed ([#518])
|
||||
|
||||
After a rename the reported name wasn't updated, or even an empty string.
|
||||
Inotify doesn't provide any good facilities to update it, so just remove the
|
||||
watcher. This is already how it worked on kqueue and FEN.
|
||||
|
||||
On Windows this does work, and remains working.
|
||||
|
||||
- windows: don't listen for file attribute changes ([#520])
|
||||
|
||||
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
|
||||
with no way to see if they're a file write or attribute change, so would show
|
||||
up as a fsnotify.Write event. This is never useful, and could result in many
|
||||
spurious Write events.
|
||||
|
||||
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
|
||||
|
||||
Before it would merely return "short read", making it hard to detect this
|
||||
error.
|
||||
|
||||
- kqueue: make sure events for all files are delivered properly when removing a
|
||||
watched directory ([#526])
|
||||
|
||||
Previously they would get sent with `""` (empty string) or `"."` as the path
|
||||
name.
|
||||
|
||||
- kqueue: don't emit spurious Create events for symbolic links ([#524])
|
||||
|
||||
The link would get resolved but kqueue would "forget" it already saw the link
|
||||
itself, resulting on a Create for every Write event for the directory.
|
||||
|
||||
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
|
||||
|
||||
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
|
||||
`backend_other.go`, making it easier to use on unsupported platforms such as
|
||||
WASM, AIX, etc. ([#528])
|
||||
|
||||
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
|
||||
Google AppEngine forbids usage of the unsafe package so the inotify backend
|
||||
won't compile there.
|
||||
|
||||
[#371]: https://github.com/fsnotify/fsnotify/pull/371
|
||||
[#516]: https://github.com/fsnotify/fsnotify/pull/516
|
||||
[#518]: https://github.com/fsnotify/fsnotify/pull/518
|
||||
[#520]: https://github.com/fsnotify/fsnotify/pull/520
|
||||
[#521]: https://github.com/fsnotify/fsnotify/pull/521
|
||||
[#524]: https://github.com/fsnotify/fsnotify/pull/524
|
||||
[#525]: https://github.com/fsnotify/fsnotify/pull/525
|
||||
[#526]: https://github.com/fsnotify/fsnotify/pull/526
|
||||
[#528]: https://github.com/fsnotify/fsnotify/pull/528
|
||||
[#537]: https://github.com/fsnotify/fsnotify/pull/537
|
||||
[#550]: https://github.com/fsnotify/fsnotify/pull/550
|
||||
[#572]: https://github.com/fsnotify/fsnotify/pull/572
|
||||
|
||||
1.6.0 - 2022-10-13
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||
|
||||
|
||||
79
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
79
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@@ -1,29 +1,31 @@
|
||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||
Windows, Linux, macOS, and BSD systems.
|
||||
Windows, Linux, macOS, BSD, and illumos.
|
||||
|
||||
Go 1.16 or newer is required; the full documentation is at
|
||||
Go 1.17 or newer is required; the full documentation is at
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
|
||||
released version, whereas this README is for the last development version which
|
||||
may include additions/changes.**
|
||||
|
||||
---
|
||||
|
||||
Platform support:
|
||||
|
||||
| Adapter | OS | Status |
|
||||
| --------------------- | ---------------| -------------------------------------------------------------|
|
||||
| inotify | Linux 2.6.32+ | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
|
||||
| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
| Backend | OS | Status |
|
||||
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
|
||||
| inotify | Linux | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FEN | illumos | Supported |
|
||||
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
|
||||
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
Linux and macOS should include Android and iOS, but these are currently untested.
|
||||
Linux and illumos should include Android and Solaris, but these are currently
|
||||
untested.
|
||||
|
||||
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
|
||||
|
||||
Usage
|
||||
-----
|
||||
@@ -83,20 +85,23 @@ run with:
|
||||
|
||||
% go run ./cmd/fsnotify
|
||||
|
||||
Further detailed documentation can be found in godoc:
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
FAQ
|
||||
---
|
||||
### Will a file still be watched when it's moved to another directory?
|
||||
No, not unless you are watching the location it was moved to.
|
||||
|
||||
### Are subdirectories watched too?
|
||||
### Are subdirectories watched?
|
||||
No, you must add watches for any directory you want to watch (a recursive
|
||||
watcher is on the roadmap: [#18]).
|
||||
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
|
||||
### Do I have to watch the Error and Event channels in a goroutine?
|
||||
As of now, yes (you can read both channels in the same goroutine using `select`,
|
||||
you don't need a separate goroutine for both channels; see the example).
|
||||
Yes. You can read both channels in the same goroutine using `select` (you don't
|
||||
need a separate goroutine for both channels; see the example).
|
||||
|
||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||
@@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
||||
|
||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||
|
||||
### Why do I get many Chmod events?
|
||||
Some programs may generate a lot of attribute changes; for example Spotlight on
|
||||
macOS, anti-virus programs, backup applications, and some others are known to do
|
||||
this. As a rule, it's typically best to ignore Chmod events. They're often not
|
||||
useful, and tend to cause problems.
|
||||
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
temporary workaround is to add your folder(s) to the *Spotlight Privacy
|
||||
settings* until we have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
||||
### Watching a file doesn't work well
|
||||
Watching individual files (rather than directories) is generally not recommended
|
||||
as many programs (especially editors) update files atomically: it will write to
|
||||
a temporary file which is then moved to to destination, overwriting the original
|
||||
(or some variant thereof). The watcher on the original file is now lost, as that
|
||||
no longer exists.
|
||||
|
||||
The upshot of this is that a power failure or crash won't leave a half-written
|
||||
file.
|
||||
|
||||
Watch the parent directory and use `Event.Name` to filter out files you're not
|
||||
interested in. There is an example of this in `cmd/fsnotify/file.go`.
|
||||
|
||||
Platform-specific notes
|
||||
-----------------------
|
||||
### Linux
|
||||
@@ -151,11 +182,3 @@ these platforms.
|
||||
|
||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||
control the maximum number of open files.
|
||||
|
||||
### macOS
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
|
||||
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
|
||||
have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
||||
550
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
550
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
@@ -1,10 +1,19 @@
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
@@ -17,9 +26,9 @@ import (
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
@@ -33,16 +42,16 @@ import (
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
@@ -58,14 +67,20 @@ import (
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
// # Windows notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
@@ -92,44 +107,129 @@ type Watcher struct {
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
mu sync.Mutex
|
||||
port *unix.EventPort
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
dirs map[string]struct{} // Explicitly watched directories
|
||||
watches map[string]struct{} // Explicitly watched non-directories
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
return NewBufferedWatcher(0)
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
w := &Watcher{
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
dirs: make(map[string]struct{}),
|
||||
watches: make(map[string]struct{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
var err error
|
||||
w.port, err = unix.NewEventPort()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// sendEvent attempts to send an event to the user, returning true if the event
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
|
||||
select {
|
||||
case w.Events <- Event{Name: name, Op: op}:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// sendError attempts to send an error to the user, returning true if the error
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *Watcher) sendError(err error) (sent bool) {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
// Take the lock used by associateFile to prevent lingering events from
|
||||
// being processed after the close
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
close(w.done)
|
||||
return w.port.Close()
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
@@ -139,15 +239,63 @@ func (w *Watcher) Close() error {
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if w.port.PathIsWatched(name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
_ = getOptions(opts...)
|
||||
|
||||
// Currently we resolve symlinks that were explicitly requested to be
|
||||
// watched. Otherwise we would use LStat here.
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Associate all files in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, true, w.associateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.dirs[name] = struct{}{}
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.associateFile(name, stat, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = struct{}{}
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error {
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if !w.port.PathIsWatched(name) {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
|
||||
// The user has expressed an intent. Immediately remove this name from
|
||||
// whichever watch list it might be in. If it's not in there the delete
|
||||
// doesn't cause harm.
|
||||
w.mu.Lock()
|
||||
delete(w.watches, name)
|
||||
delete(w.dirs, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove associations for every file in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, false, w.dissociateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.port.DissociatePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||
func (w *Watcher) readEvents() {
|
||||
// If this function returns, the watcher has been closed and we can close
|
||||
// these channels
|
||||
defer func() {
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
pevents := make([]unix.PortEvent, 8)
|
||||
for {
|
||||
count, err := w.port.Get(pevents, 1, nil)
|
||||
if err != nil && err != unix.ETIME {
|
||||
// Interrupted system call (count should be 0) ignore and continue
|
||||
if errors.Is(err, unix.EINTR) && count == 0 {
|
||||
continue
|
||||
}
|
||||
// Get failed because we called w.Close()
|
||||
if errors.Is(err, unix.EBADF) && w.isClosed() {
|
||||
return
|
||||
}
|
||||
// There was an error not caused by calling w.Close()
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p := pevents[:count]
|
||||
for _, pevent := range p {
|
||||
if pevent.Source != unix.PORT_SOURCE_FILE {
|
||||
// Event from unexpected source received; should never happen.
|
||||
if !w.sendError(errors.New("Event from unexpected source received")) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = w.handleEvent(&pevent)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle all children of the directory.
|
||||
for _, entry := range files {
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// And finally handle the directory itself.
|
||||
return handler(path, stat, follow)
|
||||
}
|
||||
|
||||
// handleEvent might need to emit more than one fsnotify event if the events
|
||||
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||
// had the attributes changed between when the association was created and the
|
||||
// when event was returned)
|
||||
func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||
var (
|
||||
events = event.Events
|
||||
path = event.Path
|
||||
fmode = event.Cookie.(os.FileMode)
|
||||
reRegister = true
|
||||
)
|
||||
|
||||
w.mu.Lock()
|
||||
_, watchedDir := w.dirs[path]
|
||||
_, watchedPath := w.watches[path]
|
||||
w.mu.Unlock()
|
||||
isWatched := watchedDir || watchedPath
|
||||
|
||||
if events&unix.FILE_DELETE != 0 {
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_FROM != 0 {
|
||||
if !w.sendEvent(path, Rename) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the new file name
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_TO != 0 {
|
||||
// We don't report a Rename event for this case, because Rename events
|
||||
// are interpreted as referring to the _old_ name of the file, and in
|
||||
// this case the event would refer to the new name of the file. This
|
||||
// type of rename event is not supported by fsnotify.
|
||||
|
||||
// inotify reports a Remove event in this case, so we simulate this
|
||||
// here.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the file that was removed
|
||||
reRegister = false
|
||||
}
|
||||
|
||||
// The file is gone, nothing left to do.
|
||||
if !reRegister {
|
||||
if watchedDir {
|
||||
w.mu.Lock()
|
||||
delete(w.dirs, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
if watchedPath {
|
||||
w.mu.Lock()
|
||||
delete(w.watches, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we didn't get a deletion the file still exists and we're going to have
|
||||
// to watch it again. Let's Stat it now so that we can compare permissions
|
||||
// and have what we need to continue watching the file
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
// This is unexpected, but we should still emit an event. This happens
|
||||
// most often on "rm -r" of a subdirectory inside a watched directory We
|
||||
// get a modify event of something happening inside, but by the time we
|
||||
// get here, the sudirectory is already gone. Clearly we were watching
|
||||
// this path but now it is gone. Let's tell the user that it was
|
||||
// removed.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
// Suppress extra write events on removed directories; they are not
|
||||
// informative and can be confusing.
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolve symlinks that were explicitly watched as we would have at Add()
|
||||
// time. this helps suppress spurious Chmod events on watched symlinks
|
||||
if isWatched {
|
||||
stat, err = os.Stat(path)
|
||||
if err != nil {
|
||||
// The symlink still exists, but the target is gone. Report the
|
||||
// Remove similar to above.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
// Don't return the error
|
||||
}
|
||||
}
|
||||
|
||||
if events&unix.FILE_MODIFIED != 0 {
|
||||
if fmode.IsDir() {
|
||||
if watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(path, Write) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(path, Write) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||
// Only send Chmod if perms changed
|
||||
if stat.Mode().Perm() != fmode.Perm() {
|
||||
if !w.sendEvent(path, Chmod) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stat != nil {
|
||||
// If we get here, it means we've hit an event above that requires us to
|
||||
// continue watching the file or directory
|
||||
return w.associateFile(path, stat, isWatched)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) updateDirectory(path string) error {
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen,
|
||||
// as everything else should still be watched.
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range files {
|
||||
path := filepath.Join(path, entry.Name())
|
||||
if w.port.PathIsWatched(path) {
|
||||
continue
|
||||
}
|
||||
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.associateFile(path, finfo, false)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if !w.sendEvent(path, Create) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
// This is primarily protecting the call to AssociatePath but it is
|
||||
// important and intentional that the call to PathIsWatched is also
|
||||
// protected by this mutex. Without this mutex, AssociatePath has been seen
|
||||
// to error out that the path is already associated.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.port.PathIsWatched(path) {
|
||||
// Remove the old association in favor of this one If we get ENOENT,
|
||||
// then while the x/sys/unix wrapper still thought that this path was
|
||||
// associated, the underlying event port did not. This call will have
|
||||
// cleared up that discrepancy. The most likely cause is that the event
|
||||
// has fired but we haven't processed it yet.
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil && err != unix.ENOENT {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// FILE_NOFOLLOW means we watch symlinks themselves rather than their
|
||||
// targets.
|
||||
events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
|
||||
if follow {
|
||||
// We *DO* follow symlinks for explicitly watched entries.
|
||||
events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
|
||||
}
|
||||
return w.port.AssociatePath(path, stat,
|
||||
events,
|
||||
stat.Mode())
|
||||
}
|
||||
|
||||
func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
if !w.port.PathIsWatched(path) {
|
||||
return nil
|
||||
}
|
||||
return w.port.DissociatePath(path)
|
||||
}
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches)+len(w.dirs))
|
||||
for pathname := range w.dirs {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
381
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
381
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
@@ -1,5 +1,8 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
//go:build linux && !appengine
|
||||
// +build linux,!appengine
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
@@ -26,9 +29,9 @@ import (
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
@@ -42,16 +45,16 @@ import (
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
@@ -67,14 +70,20 @@ import (
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
// # Windows notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
@@ -101,36 +110,148 @@ type Watcher struct {
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||
fd int
|
||||
mu sync.Mutex // Map access
|
||||
inotifyFile *os.File
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
watches *watches
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
closeMu sync.Mutex
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
mu sync.RWMutex
|
||||
wd map[uint32]*watch // wd → watch
|
||||
path map[string]uint32 // pathname → wd
|
||||
}
|
||||
watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[uint32]*watch),
|
||||
path: make(map[string]uint32),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) len() int {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return len(w.wd)
|
||||
}
|
||||
|
||||
func (w *watches) add(ww *watch) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.wd[ww.wd] = ww
|
||||
w.path[ww.path] = ww.wd
|
||||
}
|
||||
|
||||
func (w *watches) remove(wd uint32) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
delete(w.path, w.wd[wd].path)
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
|
||||
func (w *watches) removePath(path string) (uint32, bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
wd, ok := w.path[path]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
delete(w.path, path)
|
||||
delete(w.wd, wd)
|
||||
|
||||
return wd, true
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) *watch {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return w.wd[w.path[path]]
|
||||
}
|
||||
|
||||
func (w *watches) byWd(wd uint32) *watch {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return w.wd[wd]
|
||||
}
|
||||
|
||||
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
var existing *watch
|
||||
wd, ok := w.path[path]
|
||||
if ok {
|
||||
existing = w.wd[wd]
|
||||
}
|
||||
|
||||
upd, err := f(existing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if upd != nil {
|
||||
w.wd[upd.wd] = upd
|
||||
w.path[upd.path] = upd.wd
|
||||
|
||||
if upd.wd != wd {
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work
|
||||
// Otherwise, blocking i/o operations won't terminate on close
|
||||
return NewBufferedWatcher(0)
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||
// I/O operations won't terminate on close.
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
@@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) {
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
watches: newWatches(),
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
@@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool {
|
||||
case w.Events <- e:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
@@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
w.closeMu.Lock()
|
||||
if w.isClosed() {
|
||||
w.mu.Unlock()
|
||||
w.closeMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
w.mu.Unlock()
|
||||
w.closeMu.Unlock()
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
@@ -207,17 +325,21 @@ func (w *Watcher) Close() error {
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
@@ -227,44 +349,59 @@ func (w *Watcher) Close() error {
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
_ = getOptions(opts...)
|
||||
|
||||
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
|
||||
if existing != nil {
|
||||
flags |= existing.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
}
|
||||
wd, err := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
if existing == nil {
|
||||
return &watch{
|
||||
wd: uint32(wd),
|
||||
path: name,
|
||||
flags: flags,
|
||||
}, nil
|
||||
}
|
||||
|
||||
existing.wd = uint32(wd)
|
||||
existing.flags = flags
|
||||
return existing, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
@@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error {
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
return w.remove(filepath.Clean(name))
|
||||
}
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
func (w *Watcher) remove(name string) error {
|
||||
wd, ok := w.watches.removePath(name)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
success, errno := unix.InotifyRmWatch(w.fd, wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case;
|
||||
// The only two possible errors are:
|
||||
@@ -312,26 +439,26 @@ func (w *Watcher) Remove(name string) error {
|
||||
// are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
entries := make([]string, 0, w.watches.len())
|
||||
w.watches.mu.RLock()
|
||||
for pathname := range w.watches.path {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
w.watches.mu.RUnlock()
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
return entries
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
@@ -367,14 +494,11 @@ func (w *Watcher) readEvents() {
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
err = io.EOF // If EOF is received. This should really never happen.
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
err = errno // If an error occurred while reading.
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
err = errors.New("notify: short read in readEvents()") // Read was too short.
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
@@ -403,18 +527,29 @@ func (w *Watcher) readEvents() {
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
watch := w.watches.byWd(uint32(raw.Wd))
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch.wd)
|
||||
}
|
||||
// We can't really update the state when a watched path is moved;
|
||||
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
|
||||
// the watch.
|
||||
if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var name string
|
||||
if watch != nil {
|
||||
name = watch.path
|
||||
}
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
|
||||
293
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
293
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
@@ -1,12 +1,14 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@@ -24,9 +26,9 @@ import (
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
@@ -40,16 +42,16 @@ import (
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
@@ -65,14 +67,20 @@ import (
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
// # Windows notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
@@ -99,18 +107,27 @@ type Watcher struct {
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
done chan struct{}
|
||||
@@ -133,6 +150,18 @@ type pathInfo struct {
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(0)
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
kq, closepipe, err := newKqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) {
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]struct{}),
|
||||
userWatches: make(map[string]struct{}),
|
||||
Events: make(chan Event),
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
@@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool {
|
||||
case w.Events <- e:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
@@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
@@ -239,17 +268,21 @@ func (w *Watcher) Close() error {
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
@@ -259,15 +292,28 @@ func (w *Watcher) Close() error {
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
_ = getOptions(opts...)
|
||||
|
||||
w.mu.Lock()
|
||||
w.userWatches[name] = struct{}{}
|
||||
w.mu.Unlock()
|
||||
@@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error {
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return w.remove(name, true)
|
||||
}
|
||||
|
||||
func (w *Watcher) remove(name string, unwatchFiles bool) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
@@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error {
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
if unwatchFiles && isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for fd := range w.watchesByDir[name] {
|
||||
@@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error {
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
// Since these are internal, not much sense in propagating error to
|
||||
// the user, as that will just confuse them with an error about a
|
||||
// path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := make([]string, 0, len(w.userWatches))
|
||||
for pathname := range w.userWatches {
|
||||
@@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string {
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
// addWatch adds name to the watched file set; the flags are interpreted as
|
||||
// described in kevent(2).
|
||||
//
|
||||
// Returns the real path to the file which was added, with symlinks resolved.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
return "", ErrClosed
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
@@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
//
|
||||
// Linux can add unresolvable symlinks to the watch list without issue,
|
||||
// and Windows can't do symlinks period. To maintain consistency, we
|
||||
// will act like everything is fine if the link can't be resolved.
|
||||
// There will simply be no file events for broken symlinks. Hence the
|
||||
// returns of nil on errors.
|
||||
// Follow Symlinks.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
link, err := os.Readlink(name)
|
||||
if err != nil {
|
||||
// Return nil because Linux can add unresolvable symlinks to the
|
||||
// watch list without problems, so maintain consistency with
|
||||
// that. There will be no file events for broken symlinks.
|
||||
// TODO: more specific check; returns os.PathError; ENOENT?
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
_, alreadyWatching = w.watches[link]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
// Add to watches so we don't get spurious Create events later
|
||||
// on when we diff the directories.
|
||||
w.watches[name] = 0
|
||||
w.fileExists[name] = struct{}{}
|
||||
return link, nil
|
||||
}
|
||||
|
||||
name = link
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
@@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
}
|
||||
|
||||
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
||||
// See #354, and go issues 11180 and 39237.
|
||||
// See #354, and Go issues 11180 and 39237.
|
||||
for {
|
||||
watchfd, err = unix.Open(name, openMode, 0)
|
||||
if err == nil {
|
||||
@@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
w.watchesByDir[parentName] = watchesByDir
|
||||
}
|
||||
watchesByDir[watchfd] = struct{}{}
|
||||
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
// Watch the directory if it has not been watched before, or if it was
|
||||
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
@@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
defer func() {
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
unix.Close(w.closepipe[0])
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
_ = unix.Close(w.kq)
|
||||
unix.Close(w.closepipe[0])
|
||||
}()
|
||||
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
@@ -513,18 +573,8 @@ func (w *Watcher) readEvents() {
|
||||
|
||||
event := w.newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !event.Has(Remove) {
|
||||
// Double check to make sure the directory exists. This can
|
||||
// happen when we do a rm -fr on a recursively watched folders
|
||||
// and we receive a modification event first but the folder has
|
||||
// been deleted and later receive the delete event.
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Has(Rename) || event.Has(Remove) {
|
||||
w.Remove(event.Name)
|
||||
w.remove(event.Name, false)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
@@ -540,26 +590,30 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
|
||||
if event.Has(Remove) {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
// Look for a file that may have overwritten this; for example,
|
||||
// mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
err := w.sendDirectoryChangeEvents(fileDir)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
closed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
if fi, err := os.Lstat(filePath); err == nil {
|
||||
err := w.sendFileCreatedEventIfNew(filePath, fi)
|
||||
if err != nil {
|
||||
if !w.sendError(err) {
|
||||
closed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
// No point sending a write and delete event at the same time: if it's gone,
|
||||
// then it's gone.
|
||||
if e.Op.Has(Write) && e.Op.Has(Remove) {
|
||||
e.Op &^= Write
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
files, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
path := filepath.Join(dirPath, fileInfo.Name())
|
||||
for _, f := range files {
|
||||
path := filepath.Join(dirPath, f.Name())
|
||||
|
||||
cleanPath, err := w.internalWatch(path, fileInfo)
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
|
||||
cleanPath, err := w.internalWatch(path, fi)
|
||||
if err != nil {
|
||||
// No permission to read the file; that's not a problem: just skip.
|
||||
// But do add it to w.fileExists to prevent it from being picked up
|
||||
@@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
||||
cleanPath = filepath.Clean(path)
|
||||
default:
|
||||
return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
//
|
||||
// This functionality is to have the BSD watcher match the inotify, which sends
|
||||
// a create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dir string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
|
||||
return
|
||||
// Directory no longer exists: we can ignore this safely. kqueue will
|
||||
// still give us the correct events.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fi := range files {
|
||||
err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||
for _, f := range files {
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
return
|
||||
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
|
||||
}
|
||||
|
||||
err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||
if err != nil {
|
||||
// Don't need to send an error if this file isn't readable.
|
||||
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
@@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
filePath, err = w.internalWatch(filePath, fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
|
||||
if fi.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories, but preserve
|
||||
// the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
203
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
203
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
@@ -1,39 +1,169 @@
|
||||
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
|
||||
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
import "errors"
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct{}
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
||||
return nil, errors.New("fsnotify not supported on the current platform")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return nil }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return nil }
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
@@ -43,17 +173,26 @@ func (w *Watcher) Close() error {
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return nil }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
@@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error {
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error { return nil }
|
||||
|
||||
245
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
245
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
@@ -1,6 +1,13 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
// Windows backend based on ReadDirectoryChangesW()
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
//
|
||||
// Note: the documentation on the Watcher type and methods is generated from
|
||||
// mkdoc.zsh
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
@@ -27,9 +34,9 @@ import (
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
@@ -43,16 +50,16 @@ import (
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
@@ -68,14 +75,20 @@ import (
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
// # Windows notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
@@ -102,31 +115,52 @@ type Watcher struct {
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
|
||||
mu sync.Mutex // Protects access to watches, isClosed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Protects access to watches, closed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
closed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return NewBufferedWatcher(50)
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
@@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) {
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Events: make(chan Event, sz),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
@@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) {
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.closed
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
@@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
w.mu.Lock()
|
||||
w.closed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
@@ -188,17 +228,21 @@ func (w *Watcher) Close() error {
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
@@ -208,27 +252,41 @@ func (w *Watcher) Close() error {
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return errors.New("watcher already closed")
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if with.bufsize < 4096 {
|
||||
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
bufsize: with.bufsize,
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
@@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error {
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
@@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error {
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
@@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string {
|
||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||
const (
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
@@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -321,10 +388,11 @@ const (
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
bufsize int
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
@@ -334,13 +402,14 @@ type inode struct {
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [65536]byte // 64K buffer
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
recurse bool // Recursive watch?
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf []byte // buffer, allocated later
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) {
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
//pathname, recurse := recursivePath(pathname)
|
||||
recurse := false
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
recurse: recurse,
|
||||
buf: make([]byte, bufsize),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
@@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error {
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
|
||||
if recurse && !watch.recurse {
|
||||
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
|
||||
}
|
||||
|
||||
err = windows.CloseHandle(ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
@@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
// We need to pass the array, rather than the slice.
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
|
||||
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
|
||||
watch.recurse, mask, nil, &watch.ov, 0)
|
||||
if rdErr != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
@@ -563,9 +646,8 @@ func (w *Watcher) readEvents() {
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
// This error is handled after the watch == nil check below.
|
||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||
// This error is handled after the watch == nil check below. NOTE: this
|
||||
// seems odd, note sure if it's correct.
|
||||
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
if watch == nil {
|
||||
@@ -595,7 +677,7 @@ func (w *Watcher) readEvents() {
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
@@ -605,6 +687,8 @@ func (w *Watcher) readEvents() {
|
||||
}
|
||||
|
||||
switch qErr {
|
||||
case nil:
|
||||
// No error
|
||||
case windows.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||
@@ -626,13 +710,12 @@ func (w *Watcher) readEvents() {
|
||||
default:
|
||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.sendError(errors.New("short read in readEvents()"))
|
||||
w.sendError(ErrEventOverflow)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -703,8 +786,9 @@ func (w *Watcher) readEvents() {
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
//lint:ignore ST1005 Windows should be capitalized
|
||||
w.sendError(errors.New(
|
||||
"Windows system assumed buffer larger than it is, events have likely been missed."))
|
||||
"Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
|
||||
91
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
91
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
@@ -1,13 +1,18 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a cross-platform interface for file system
|
||||
// notifications.
|
||||
//
|
||||
// Currently supported systems:
|
||||
//
|
||||
// Linux 2.6.32+ via inotify
|
||||
// BSD, macOS via kqueue
|
||||
// Windows via ReadDirectoryChangesW
|
||||
// illumos via FEN
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -33,34 +38,52 @@ type Op uint32
|
||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||
// full description, and check them with [Event.Has].
|
||||
const (
|
||||
// A new pathname was created.
|
||||
Create Op = 1 << iota
|
||||
|
||||
// The pathname was written to; this does *not* mean the write has finished,
|
||||
// and a write can be followed by more writes.
|
||||
Write
|
||||
|
||||
// The path was removed; any watches on it will be removed. Some "remove"
|
||||
// operations may trigger a Rename if the file is actually moved (for
|
||||
// example "remove to trash" is often a rename).
|
||||
Remove
|
||||
|
||||
// The path was renamed to something else; any watched on it will be
|
||||
// removed.
|
||||
Rename
|
||||
|
||||
// File attributes were changed.
|
||||
//
|
||||
// It's generally not recommended to take action on this event, as it may
|
||||
// get triggered very frequently by some software. For example, Spotlight
|
||||
// indexing on macOS, anti-virus software, backup software, etc.
|
||||
Chmod
|
||||
)
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
// Common errors that can be reported.
|
||||
var (
|
||||
ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
|
||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
func (o Op) String() string {
|
||||
var b strings.Builder
|
||||
if op.Has(Create) {
|
||||
if o.Has(Create) {
|
||||
b.WriteString("|CREATE")
|
||||
}
|
||||
if op.Has(Remove) {
|
||||
if o.Has(Remove) {
|
||||
b.WriteString("|REMOVE")
|
||||
}
|
||||
if op.Has(Write) {
|
||||
if o.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if op.Has(Rename) {
|
||||
if o.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
if op.Has(Chmod) {
|
||||
if o.Has(Chmod) {
|
||||
b.WriteString("|CHMOD")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
@@ -70,7 +93,7 @@ func (op Op) String() string {
|
||||
}
|
||||
|
||||
// Has reports if this operation has the given operation.
|
||||
func (o Op) Has(h Op) bool { return o&h == h }
|
||||
func (o Op) Has(h Op) bool { return o&h != 0 }
|
||||
|
||||
// Has reports if this event has the given operation.
|
||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
@@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
func (e Event) String() string {
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
||||
type (
|
||||
addOpt func(opt *withOpts)
|
||||
withOpts struct {
|
||||
bufsize int
|
||||
}
|
||||
)
|
||||
|
||||
var defaultOpts = withOpts{
|
||||
bufsize: 65536, // 64K
|
||||
}
|
||||
|
||||
func getOptions(opts ...addOpt) withOpts {
|
||||
with := defaultOpts
|
||||
for _, o := range opts {
|
||||
o(&with)
|
||||
}
|
||||
return with
|
||||
}
|
||||
|
||||
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
|
||||
//
|
||||
// This only has effect on Windows systems, and is a no-op for other backends.
|
||||
//
|
||||
// The default value is 64K (65536 bytes) which is the highest value that works
|
||||
// on all filesystems and should be enough for most applications, but if you
|
||||
// have a large burst of events it may not be enough. You can increase it if
|
||||
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
|
||||
//
|
||||
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
func WithBufferSize(bytes int) addOpt {
|
||||
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||
}
|
||||
|
||||
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||
// path with the /... stripped.
|
||||
func recursivePath(path string) (string, bool) {
|
||||
if filepath.Base(path) == "..." {
|
||||
return filepath.Dir(path), true
|
||||
}
|
||||
return path, false
|
||||
}
|
||||
|
||||
123
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
123
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
@@ -2,8 +2,8 @@
|
||||
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
|
||||
setopt err_exit no_unset pipefail extended_glob
|
||||
|
||||
# Simple script to update the godoc comments on all watchers. Probably took me
|
||||
# more time to write this than doing it manually, but ah well 🙃
|
||||
# Simple script to update the godoc comments on all watchers so you don't need
|
||||
# to update the same comment 5 times.
|
||||
|
||||
watcher=$(<<EOF
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
@@ -16,9 +16,9 @@ watcher=$(<<EOF
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
@@ -32,16 +32,16 @@ watcher=$(<<EOF
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
@@ -57,14 +57,20 @@ watcher=$(<<EOF
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
// # Windows notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all times, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
EOF
|
||||
)
|
||||
|
||||
@@ -73,20 +79,36 @@ new=$(<<EOF
|
||||
EOF
|
||||
)
|
||||
|
||||
newbuffered=$(<<EOF
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
EOF
|
||||
)
|
||||
|
||||
add=$(<<EOF
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
@@ -96,14 +118,27 @@ add=$(<<EOF
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
EOF
|
||||
)
|
||||
|
||||
addwith=$(<<EOF
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
EOF
|
||||
)
|
||||
|
||||
@@ -114,16 +149,21 @@ remove=$(<<EOF
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
EOF
|
||||
)
|
||||
|
||||
close=$(<<EOF
|
||||
// Close removes all watches and closes the events channel.
|
||||
// Close removes all watches and closes the Events channel.
|
||||
EOF
|
||||
)
|
||||
|
||||
watchlist=$(<<EOF
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
EOF
|
||||
)
|
||||
|
||||
@@ -153,20 +193,29 @@ events=$(<<EOF
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
EOF
|
||||
)
|
||||
|
||||
errors=$(<<EOF
|
||||
// Errors sends any errors.
|
||||
//
|
||||
// ErrEventOverflow is used to indicate there are too many events:
|
||||
//
|
||||
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
EOF
|
||||
)
|
||||
|
||||
@@ -200,7 +249,9 @@ set-cmt() {
|
||||
|
||||
set-cmt '^type Watcher struct ' $watcher
|
||||
set-cmt '^func NewWatcher(' $new
|
||||
set-cmt '^func NewBufferedWatcher(' $newbuffered
|
||||
set-cmt '^func (w \*Watcher) Add(' $add
|
||||
set-cmt '^func (w \*Watcher) AddWith(' $addwith
|
||||
set-cmt '^func (w \*Watcher) Remove(' $remove
|
||||
set-cmt '^func (w \*Watcher) Close(' $close
|
||||
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
|
||||
|
||||
35
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
35
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
@@ -7,10 +7,13 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type CompareType int
|
||||
// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it.
|
||||
type CompareType = compareResult
|
||||
|
||||
type compareResult int
|
||||
|
||||
const (
|
||||
compareLess CompareType = iota - 1
|
||||
compareLess compareResult = iota - 1
|
||||
compareEqual
|
||||
compareGreater
|
||||
)
|
||||
@@ -39,7 +42,7 @@ var (
|
||||
bytesType = reflect.TypeOf([]byte{})
|
||||
)
|
||||
|
||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) {
|
||||
obj1Value := reflect.ValueOf(obj1)
|
||||
obj2Value := reflect.ValueOf(obj2)
|
||||
|
||||
@@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
|
||||
}
|
||||
|
||||
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
|
||||
if timeObj1.Before(timeObj2) {
|
||||
return compareLess, true
|
||||
}
|
||||
if timeObj1.Equal(timeObj2) {
|
||||
return compareEqual, true
|
||||
}
|
||||
return compareGreater, true
|
||||
}
|
||||
case reflect.Slice:
|
||||
{
|
||||
@@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
|
||||
}
|
||||
|
||||
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
|
||||
return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true
|
||||
}
|
||||
case reflect.Uintptr:
|
||||
{
|
||||
@@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// GreaterOrEqual asserts that the first element is greater than or equal to the second
|
||||
@@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Less asserts that the first element is less than the second
|
||||
@@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// LessOrEqual asserts that the first element is less than or equal to the second
|
||||
@@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Positive asserts that the specified element is positive
|
||||
@@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
h.Helper()
|
||||
}
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
|
||||
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
|
||||
}
|
||||
|
||||
// Negative asserts that the specified element is negative
|
||||
@@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||
h.Helper()
|
||||
}
|
||||
zero := reflect.Zero(reflect.TypeOf(e))
|
||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
|
||||
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...)
|
||||
}
|
||||
|
||||
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
@@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
|
||||
return true
|
||||
}
|
||||
|
||||
func containsValue(values []CompareType, value CompareType) bool {
|
||||
func containsValue(values []compareResult, value compareResult) bool {
|
||||
for _, v := range values {
|
||||
if v == value {
|
||||
return true
|
||||
|
||||
34
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
34
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
@@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
|
||||
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the larger
|
||||
// type and equal.
|
||||
//
|
||||
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
|
||||
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
@@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
|
||||
// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
|
||||
// // add assertions as needed; any assertion failure will fail the current tick
|
||||
// assert.True(c, externalValue, "expected 'externalValue' to be true")
|
||||
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
|
||||
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
|
||||
func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a
|
||||
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should not match.
|
||||
// This is an inverse of ElementsMatch.
|
||||
//
|
||||
// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
|
||||
//
|
||||
// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
|
||||
//
|
||||
// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
|
||||
func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
|
||||
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// NotErrorAsf asserts that none of the errors in err's chain matches target,
|
||||
// but if so, sets target to that error value.
|
||||
func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
|
||||
68
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
68
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
@@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
|
||||
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
// EqualValues asserts that two objects are equal or convertible to the larger
|
||||
// type and equal.
|
||||
//
|
||||
// a.EqualValues(uint32(123), int32(123))
|
||||
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
|
||||
@@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
|
||||
return EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the larger
|
||||
// type and equal.
|
||||
//
|
||||
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
|
||||
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
|
||||
@@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti
|
||||
// a.EventuallyWithT(func(c *assert.CollectT) {
|
||||
// // add assertions as needed; any assertion failure will fail the current tick
|
||||
// assert.True(c, externalValue, "expected 'externalValue' to be true")
|
||||
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
|
||||
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
|
||||
func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor
|
||||
// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
|
||||
// // add assertions as needed; any assertion failure will fail the current tick
|
||||
// assert.True(c, externalValue, "expected 'externalValue' to be true")
|
||||
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
|
||||
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
|
||||
func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin
|
||||
return NotContainsf(a.t, s, contains, msg, args...)
|
||||
}
|
||||
|
||||
// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should not match.
|
||||
// This is an inverse of ElementsMatch.
|
||||
//
|
||||
// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
|
||||
//
|
||||
// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
|
||||
//
|
||||
// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
|
||||
func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotElementsMatch(a.t, listA, listB, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should not match.
|
||||
// This is an inverse of ElementsMatch.
|
||||
//
|
||||
// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
|
||||
//
|
||||
// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
|
||||
//
|
||||
// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
|
||||
func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotElementsMatchf(a.t, listA, listB, msg, args...)
|
||||
}
|
||||
|
||||
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
|
||||
return NotEqualf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that at none of the errors in err's chain matches target.
|
||||
// NotErrorAs asserts that none of the errors in err's chain matches target,
|
||||
// but if so, sets target to that error value.
|
||||
func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorAs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorAsf asserts that none of the errors in err's chain matches target,
|
||||
// but if so, sets target to that error value.
|
||||
func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotErrorAsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
@@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface
|
||||
return NotErrorIs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// NotErrorIsf asserts that none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
|
||||
10
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
10
vendor/github.com/stretchr/testify/assert/assertion_order.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
// isOrdered checks that collection contains orderable elements.
|
||||
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
|
||||
objKind := reflect.TypeOf(object).Kind()
|
||||
if objKind != reflect.Slice && objKind != reflect.Array {
|
||||
return false
|
||||
@@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
|
||||
// assert.IsIncreasing(t, []float{1, 2})
|
||||
// assert.IsIncreasing(t, []string{"a", "b"})
|
||||
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonIncreasing asserts that the collection is not increasing
|
||||
@@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
|
||||
// assert.IsNonIncreasing(t, []float{2, 1})
|
||||
// assert.IsNonIncreasing(t, []string{"b", "a"})
|
||||
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsDecreasing asserts that the collection is decreasing
|
||||
@@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
|
||||
// assert.IsDecreasing(t, []float{2, 1})
|
||||
// assert.IsDecreasing(t, []string{"b", "a"})
|
||||
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
// IsNonDecreasing asserts that the collection is not decreasing
|
||||
@@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
|
||||
// assert.IsNonDecreasing(t, []float{1, 2})
|
||||
// assert.IsNonDecreasing(t, []string{"a", "b"})
|
||||
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||
}
|
||||
|
||||
157
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
157
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@@ -19,7 +19,9 @@ import (
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
// Wrapper around gopkg.in/yaml.v3
|
||||
"github.com/stretchr/testify/assert/yaml"
|
||||
)
|
||||
|
||||
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
|
||||
@@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
|
||||
// for table driven tests.
|
||||
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
|
||||
|
||||
// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful
|
||||
// for table driven tests.
|
||||
type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool
|
||||
|
||||
// Comparison is a custom function that returns true on success and false on failure
|
||||
type Comparison func() (success bool)
|
||||
|
||||
@@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
|
||||
h.Helper()
|
||||
}
|
||||
|
||||
if !samePointers(expected, actual) {
|
||||
same, ok := samePointers(expected, actual)
|
||||
if !ok {
|
||||
return Fail(t, "Both arguments must be pointers", msgAndArgs...)
|
||||
}
|
||||
|
||||
if !same {
|
||||
// both are pointers but not the same type & pointing to the same address
|
||||
return Fail(t, fmt.Sprintf("Not same: \n"+
|
||||
"expected: %p %#v\n"+
|
||||
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
|
||||
@@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
|
||||
h.Helper()
|
||||
}
|
||||
|
||||
if samePointers(expected, actual) {
|
||||
same, ok := samePointers(expected, actual)
|
||||
if !ok {
|
||||
//fails when the arguments are not pointers
|
||||
return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
|
||||
}
|
||||
|
||||
if same {
|
||||
return Fail(t, fmt.Sprintf(
|
||||
"Expected and actual point to the same object: %p %#v",
|
||||
expected, expected), msgAndArgs...)
|
||||
@@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
|
||||
return true
|
||||
}
|
||||
|
||||
// samePointers compares two generic interface objects and returns whether
|
||||
// they point to the same object
|
||||
func samePointers(first, second interface{}) bool {
|
||||
// samePointers checks if two generic interface objects are pointers of the same
|
||||
// type pointing to the same object. It returns two values: same indicating if
|
||||
// they are the same type and point to the same object, and ok indicating that
|
||||
// both inputs are pointers.
|
||||
func samePointers(first, second interface{}) (same bool, ok bool) {
|
||||
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
|
||||
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
|
||||
return false
|
||||
return false, false //not both are pointers
|
||||
}
|
||||
|
||||
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
|
||||
if firstType != secondType {
|
||||
return false
|
||||
return false, true // both are pointers, but of different types
|
||||
}
|
||||
|
||||
// compare pointer addresses
|
||||
return first == second
|
||||
return first == second, true
|
||||
}
|
||||
|
||||
// formatUnequalValues takes two values of arbitrary types and returns string
|
||||
@@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string {
|
||||
return value
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
// EqualValues asserts that two objects are equal or convertible to the larger
|
||||
// type and equal.
|
||||
//
|
||||
// assert.EqualValues(t, uint32(123), int32(123))
|
||||
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
|
||||
@@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
|
||||
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
|
||||
}
|
||||
|
||||
if aType.Kind() == reflect.Ptr {
|
||||
aType = aType.Elem()
|
||||
}
|
||||
if bType.Kind() == reflect.Ptr {
|
||||
bType = bType.Elem()
|
||||
}
|
||||
|
||||
if aType.Kind() != reflect.Struct {
|
||||
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
|
||||
}
|
||||
|
||||
if bType.Kind() != reflect.Struct {
|
||||
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
|
||||
}
|
||||
|
||||
expected = copyExportedFields(expected)
|
||||
actual = copyExportedFields(actual)
|
||||
|
||||
@@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri
|
||||
return msg.String()
|
||||
}
|
||||
|
||||
// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should not match.
|
||||
// This is an inverse of ElementsMatch.
|
||||
//
|
||||
// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
|
||||
//
|
||||
// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
|
||||
//
|
||||
// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
|
||||
func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if isEmpty(listA) && isEmpty(listB) {
|
||||
return Fail(t, "listA and listB contain the same elements", msgAndArgs)
|
||||
}
|
||||
|
||||
if !isList(t, listA, msgAndArgs...) {
|
||||
return Fail(t, "listA is not a list type", msgAndArgs...)
|
||||
}
|
||||
if !isList(t, listB, msgAndArgs...) {
|
||||
return Fail(t, "listB is not a list type", msgAndArgs...)
|
||||
}
|
||||
|
||||
extraA, extraB := diffLists(listA, listB)
|
||||
if len(extraA) == 0 && len(extraB) == 0 {
|
||||
return Fail(t, "listA and listB contain the same elements", msgAndArgs)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Condition uses a Comparison to assert a complex condition.
|
||||
func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
@@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
|
||||
if err != nil {
|
||||
return Fail(t, err.Error(), msgAndArgs...)
|
||||
}
|
||||
if math.IsNaN(actualEpsilon) {
|
||||
return Fail(t, "relative error is NaN", msgAndArgs...)
|
||||
}
|
||||
if actualEpsilon > epsilon {
|
||||
return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
|
||||
" < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
|
||||
@@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in
|
||||
|
||||
// matchRegexp return true if a specified regexp matches a string.
|
||||
func matchRegexp(rx interface{}, str interface{}) bool {
|
||||
|
||||
var r *regexp.Regexp
|
||||
if rr, ok := rx.(*regexp.Regexp); ok {
|
||||
r = rr
|
||||
@@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool {
|
||||
r = regexp.MustCompile(fmt.Sprint(rx))
|
||||
}
|
||||
|
||||
return (r.FindStringIndex(fmt.Sprint(str)) != nil)
|
||||
switch v := str.(type) {
|
||||
case []byte:
|
||||
return r.Match(v)
|
||||
case string:
|
||||
return r.MatchString(v)
|
||||
default:
|
||||
return r.MatchString(fmt.Sprint(v))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{
|
||||
MaxDepth: 10,
|
||||
}
|
||||
|
||||
type tHelper interface {
|
||||
type tHelper = interface {
|
||||
Helper()
|
||||
}
|
||||
|
||||
@@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
|
||||
|
||||
// CollectT implements the TestingT interface and collects all errors.
|
||||
type CollectT struct {
|
||||
// A slice of errors. Non-nil slice denotes a failure.
|
||||
// If it's non-nil but len(c.errors) == 0, this is also a failure
|
||||
// obtained by direct c.FailNow() call.
|
||||
errors []error
|
||||
}
|
||||
|
||||
@@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
|
||||
c.errors = append(c.errors, fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// FailNow panics.
|
||||
func (*CollectT) FailNow() {
|
||||
panic("Assertion failed")
|
||||
// FailNow stops execution by calling runtime.Goexit.
|
||||
func (c *CollectT) FailNow() {
|
||||
c.fail()
|
||||
runtime.Goexit()
|
||||
}
|
||||
|
||||
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
|
||||
@@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) {
|
||||
panic("Copy() is deprecated")
|
||||
}
|
||||
|
||||
func (c *CollectT) fail() {
|
||||
if !c.failed() {
|
||||
c.errors = []error{} // Make it non-nil to mark a failure.
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CollectT) failed() bool {
|
||||
return c.errors != nil
|
||||
}
|
||||
|
||||
// EventuallyWithT asserts that given condition will be met in waitFor time,
|
||||
// periodically checking target function each tick. In contrast to Eventually,
|
||||
// it supplies a CollectT to the condition function, so that the condition
|
||||
@@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) {
|
||||
// assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// // add assertions as needed; any assertion failure will fail the current tick
|
||||
// assert.True(c, externalValue, "expected 'externalValue' to be true")
|
||||
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
|
||||
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
|
||||
func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
|
||||
var lastFinishedTickErrs []error
|
||||
ch := make(chan []error, 1)
|
||||
ch := make(chan *CollectT, 1)
|
||||
|
||||
timer := time.NewTimer(waitFor)
|
||||
defer timer.Stop()
|
||||
@@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
|
||||
go func() {
|
||||
collect := new(CollectT)
|
||||
defer func() {
|
||||
ch <- collect.errors
|
||||
ch <- collect
|
||||
}()
|
||||
condition(collect)
|
||||
}()
|
||||
case errs := <-ch:
|
||||
if len(errs) == 0 {
|
||||
case collect := <-ch:
|
||||
if !collect.failed() {
|
||||
return true
|
||||
}
|
||||
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
|
||||
lastFinishedTickErrs = errs
|
||||
lastFinishedTickErrs = collect.errors
|
||||
tick = ticker.C
|
||||
}
|
||||
}
|
||||
@@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
|
||||
), msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that at none of the errors in err's chain matches target.
|
||||
// NotErrorIs asserts that none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
@@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
|
||||
), msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorAs asserts that none of the errors in err's chain matches target,
|
||||
// but if so, sets target to that error value.
|
||||
func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if !errors.As(err, target) {
|
||||
return true
|
||||
}
|
||||
|
||||
chain := buildErrorChainString(err)
|
||||
|
||||
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
|
||||
"found: %q\n"+
|
||||
"in chain: %s", target, chain,
|
||||
), msgAndArgs...)
|
||||
}
|
||||
|
||||
func buildErrorChainString(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
|
||||
25
vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
generated
vendored
Normal file
25
vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
|
||||
// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
|
||||
|
||||
// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
|
||||
//
|
||||
// This implementation is selected with the testify_yaml_custom build tag.
|
||||
//
|
||||
// go test -tags testify_yaml_custom
|
||||
//
|
||||
// This implementation can be used at build time to replace the default implementation
|
||||
// to avoid linking with [gopkg.in/yaml.v3].
|
||||
//
|
||||
// In your test package:
|
||||
//
|
||||
// import assertYaml "github.com/stretchr/testify/assert/yaml"
|
||||
//
|
||||
// func init() {
|
||||
// assertYaml.Unmarshal = func (in []byte, out interface{}) error {
|
||||
// // ...
|
||||
// return nil
|
||||
// }
|
||||
// }
|
||||
package yaml
|
||||
|
||||
var Unmarshal func(in []byte, out interface{}) error
|
||||
37
vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
generated
vendored
Normal file
37
vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
//go:build !testify_yaml_fail && !testify_yaml_custom
|
||||
// +build !testify_yaml_fail,!testify_yaml_custom
|
||||
|
||||
// Package yaml is just an indirection to handle YAML deserialization.
|
||||
//
|
||||
// This package is just an indirection that allows the builder to override the
|
||||
// indirection with an alternative implementation of this package that uses
|
||||
// another implementation of YAML deserialization. This allows to not either not
|
||||
// use YAML deserialization at all, or to use another implementation than
|
||||
// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]).
|
||||
//
|
||||
// Alternative implementations are selected using build tags:
|
||||
//
|
||||
// - testify_yaml_fail: [Unmarshal] always fails with an error
|
||||
// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it
|
||||
// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or
|
||||
// [github.com/stretchr/testify/assert.YAMLEqf].
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go test -tags testify_yaml_fail
|
||||
//
|
||||
// You can check with "go list" which implementation is linked:
|
||||
//
|
||||
// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
|
||||
// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
|
||||
// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
|
||||
//
|
||||
// [PR #1120]: https://github.com/stretchr/testify/pull/1120
|
||||
package yaml
|
||||
|
||||
import goyaml "gopkg.in/yaml.v3"
|
||||
|
||||
// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal].
|
||||
func Unmarshal(in []byte, out interface{}) error {
|
||||
return goyaml.Unmarshal(in, out)
|
||||
}
|
||||
18
vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
generated
vendored
Normal file
18
vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
|
||||
// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
|
||||
|
||||
// Package yaml is an implementation of YAML functions that always fail.
|
||||
//
|
||||
// This implementation can be used at build time to replace the default implementation
|
||||
// to avoid linking with [gopkg.in/yaml.v3]:
|
||||
//
|
||||
// go test -tags testify_yaml_fail
|
||||
package yaml
|
||||
|
||||
import "errors"
|
||||
|
||||
var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)")
|
||||
|
||||
func Unmarshal([]byte, interface{}) error {
|
||||
return errNotImplemented
|
||||
}
|
||||
432
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
432
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/stretchr/testify/require/require.go.tmpl
generated
vendored
2
vendor/github.com/stretchr/testify/require/require.go.tmpl
generated
vendored
@@ -1,4 +1,4 @@
|
||||
{{.Comment}}
|
||||
{{ replace .Comment "assert." "require."}}
|
||||
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
|
||||
if h, ok := t.(tHelper); ok { h.Helper() }
|
||||
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
|
||||
|
||||
68
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
68
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
@@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
|
||||
EqualExportedValuesf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
// EqualValues asserts that two objects are equal or convertible to the larger
|
||||
// type and equal.
|
||||
//
|
||||
// a.EqualValues(uint32(123), int32(123))
|
||||
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
|
||||
@@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
|
||||
EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the larger
|
||||
// type and equal.
|
||||
//
|
||||
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
|
||||
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) {
|
||||
@@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti
|
||||
// a.EventuallyWithT(func(c *assert.CollectT) {
|
||||
// // add assertions as needed; any assertion failure will fail the current tick
|
||||
// assert.True(c, externalValue, "expected 'externalValue' to be true")
|
||||
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
|
||||
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
|
||||
func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w
|
||||
// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
|
||||
// // add assertions as needed; any assertion failure will fail the current tick
|
||||
// assert.True(c, externalValue, "expected 'externalValue' to be true")
|
||||
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
|
||||
// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
|
||||
func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin
|
||||
NotContainsf(a.t, s, contains, msg, args...)
|
||||
}
|
||||
|
||||
// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should not match.
|
||||
// This is an inverse of ElementsMatch.
|
||||
//
|
||||
// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
|
||||
//
|
||||
// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
|
||||
//
|
||||
// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
|
||||
func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotElementsMatch(a.t, listA, listB, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
|
||||
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
|
||||
// the number of appearances of each of them in both lists should not match.
|
||||
// This is an inverse of ElementsMatch.
|
||||
//
|
||||
// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
|
||||
//
|
||||
// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
|
||||
//
|
||||
// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
|
||||
func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotElementsMatchf(a.t, listA, listB, msg, args...)
|
||||
}
|
||||
|
||||
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
|
||||
// a slice or a channel with len == 0.
|
||||
//
|
||||
@@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
|
||||
NotEqualf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that at none of the errors in err's chain matches target.
|
||||
// NotErrorAs asserts that none of the errors in err's chain matches target,
|
||||
// but if so, sets target to that error value.
|
||||
func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotErrorAs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorAsf asserts that none of the errors in err's chain matches target,
|
||||
// but if so, sets target to that error value.
|
||||
func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotErrorAsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// NotErrorIs asserts that none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
@@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface
|
||||
NotErrorIs(a.t, err, target, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
|
||||
// NotErrorIsf asserts that none of the errors in err's chain matches target.
|
||||
// This is a wrapper for errors.Is.
|
||||
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
|
||||
2
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
2
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
@@ -6,7 +6,7 @@ type TestingT interface {
|
||||
FailNow()
|
||||
}
|
||||
|
||||
type tHelper interface {
|
||||
type tHelper = interface {
|
||||
Helper()
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/stretchr/testify/suite/doc.go
generated
vendored
4
vendor/github.com/stretchr/testify/suite/doc.go
generated
vendored
@@ -5,6 +5,8 @@
|
||||
// or individual tests (depending on which interface(s) you
|
||||
// implement).
|
||||
//
|
||||
// The suite package does not support parallel tests. See [issue 934].
|
||||
//
|
||||
// A testing suite is usually built by first extending the built-in
|
||||
// suite functionality from suite.Suite in testify. Alternatively,
|
||||
// you could reproduce that logic on your own if you wanted (you
|
||||
@@ -63,4 +65,6 @@
|
||||
// func TestExampleTestSuite(t *testing.T) {
|
||||
// suite.Run(t, new(ExampleTestSuite))
|
||||
// }
|
||||
//
|
||||
// [issue 934]: https://github.com/stretchr/testify/issues/934
|
||||
package suite
|
||||
|
||||
5
vendor/github.com/vbatts/tar-split/archive/tar/reader.go
generated
vendored
5
vendor/github.com/vbatts/tar-split/archive/tar/reader.go
generated
vendored
@@ -56,6 +56,11 @@ func (tr *Reader) RawBytes() []byte {
|
||||
|
||||
}
|
||||
|
||||
// ExpectedPadding returns the number of bytes of padding expected after the last header returned by Next()
|
||||
func (tr *Reader) ExpectedPadding() int64 {
|
||||
return tr.pad
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader reading from r.
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{r: r, curr: ®FileReader{r, 0}}
|
||||
|
||||
57
vendor/github.com/vbatts/tar-split/tar/asm/iterate.go
generated
vendored
Normal file
57
vendor/github.com/vbatts/tar-split/tar/asm/iterate.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package asm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
)
|
||||
|
||||
// IterateHeaders calls handler for each tar header provided by Unpacker
|
||||
func IterateHeaders(unpacker storage.Unpacker, handler func(hdr *tar.Header) error) error {
|
||||
// We assume about NewInputTarStream:
|
||||
// - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions
|
||||
// - (There is a FileType entry for every tar header, we ignore it)
|
||||
// - Trailing padding of a file, if any, is included in the next SegmentType entry
|
||||
// - At the end, there may be SegmentType entries just for the terminating zero blocks.
|
||||
|
||||
var pendingPadding int64 = 0
|
||||
for {
|
||||
tsEntry, err := unpacker.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("reading tar-split entries: %w", err)
|
||||
}
|
||||
switch tsEntry.Type {
|
||||
case storage.SegmentType:
|
||||
payload := tsEntry.Payload
|
||||
if int64(len(payload)) < pendingPadding {
|
||||
return fmt.Errorf("expected %d bytes of padding after previous file, but next SegmentType only has %d bytes", pendingPadding, len(payload))
|
||||
}
|
||||
payload = payload[pendingPadding:]
|
||||
pendingPadding = 0
|
||||
|
||||
tr := tar.NewReader(bytes.NewReader(payload))
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF { // Probably the last entry, but let’s let the unpacker drive that.
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err)
|
||||
}
|
||||
if err := handler(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
pendingPadding = tr.ExpectedPadding()
|
||||
|
||||
case storage.FileType:
|
||||
// Nothing
|
||||
default:
|
||||
return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
6
vendor/k8s.io/klog/v2/.golangci.yaml
generated
vendored
Normal file
6
vendor/k8s.io/klog/v2/.golangci.yaml
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
linters:
|
||||
disable-all: true
|
||||
enable: # sorted alphabetical
|
||||
- gofmt
|
||||
- misspell
|
||||
- revive
|
||||
4
vendor/k8s.io/klog/v2/OWNERS
generated
vendored
4
vendor/k8s.io/klog/v2/OWNERS
generated
vendored
@@ -1,14 +1,16 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
reviewers:
|
||||
- harshanarayana
|
||||
- mengjiao-liu
|
||||
- pohly
|
||||
approvers:
|
||||
- dims
|
||||
- pohly
|
||||
- thockin
|
||||
- serathius
|
||||
emeritus_approvers:
|
||||
- brancz
|
||||
- justinsb
|
||||
- lavalamp
|
||||
- piosz
|
||||
- serathius
|
||||
- tallclair
|
||||
|
||||
31
vendor/k8s.io/klog/v2/contextual_slog.go
generated
vendored
Normal file
31
vendor/k8s.io/klog/v2/contextual_slog.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package klog
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// SetSlogLogger reconfigures klog to log through the slog logger. The logger must not be nil.
|
||||
func SetSlogLogger(logger *slog.Logger) {
|
||||
SetLoggerWithOptions(logr.FromSlogHandler(logger.Handler()), ContextualLogger(true))
|
||||
}
|
||||
65
vendor/k8s.io/klog/v2/format.go
generated
vendored
Normal file
65
vendor/k8s.io/klog/v2/format.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package klog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// Format wraps a value of an arbitrary type and implement fmt.Stringer and
|
||||
// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog
|
||||
// returns the original value with a type that has no special methods, in
|
||||
// particular no MarshalLog or MarshalJSON.
|
||||
//
|
||||
// Wrapping values like that is useful when the value has a broken
|
||||
// implementation of these special functions (for example, a type which
|
||||
// inherits String from TypeMeta, but then doesn't re-implement String) or the
|
||||
// implementation produces output that is less readable or unstructured (for
|
||||
// example, the generated String functions for Kubernetes API types).
|
||||
func Format(obj interface{}) interface{} {
|
||||
return formatAny{Object: obj}
|
||||
}
|
||||
|
||||
type formatAny struct {
|
||||
Object interface{}
|
||||
}
|
||||
|
||||
func (f formatAny) String() string {
|
||||
var buffer strings.Builder
|
||||
encoder := json.NewEncoder(&buffer)
|
||||
encoder.SetIndent("", " ")
|
||||
if err := encoder.Encode(&f.Object); err != nil {
|
||||
return fmt.Sprintf("error marshaling %T to JSON: %v", f, err)
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (f formatAny) MarshalLog() interface{} {
|
||||
// Returning a pointer to a pointer ensures that zapr doesn't find a
|
||||
// fmt.Stringer or logr.Marshaler when it checks the type of the
|
||||
// value. It then falls back to reflection, which dumps the value being
|
||||
// pointed to (JSON doesn't have pointers).
|
||||
ptr := &f.Object
|
||||
return &ptr
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = formatAny{}
|
||||
var _ logr.Marshaler = formatAny{}
|
||||
12
vendor/k8s.io/klog/v2/internal/buffer/buffer.go
generated
vendored
12
vendor/k8s.io/klog/v2/internal/buffer/buffer.go
generated
vendored
@@ -30,14 +30,16 @@ import (
|
||||
var (
|
||||
// Pid is inserted into log headers. Can be overridden for tests.
|
||||
Pid = os.Getpid()
|
||||
|
||||
// Time, if set, will be used instead of the actual current time.
|
||||
Time *time.Time
|
||||
)
|
||||
|
||||
// Buffer holds a single byte.Buffer for reuse. The zero value is ready for
|
||||
// use. It also provides some helper methods for output formatting.
|
||||
type Buffer struct {
|
||||
bytes.Buffer
|
||||
Tmp [64]byte // temporary byte array for creating headers.
|
||||
next *Buffer
|
||||
Tmp [64]byte // temporary byte array for creating headers.
|
||||
}
|
||||
|
||||
var buffers = sync.Pool{
|
||||
@@ -122,6 +124,9 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now
|
||||
|
||||
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
|
||||
// It's worth about 3X. Fprintf is hard.
|
||||
if Time != nil {
|
||||
now = *Time
|
||||
}
|
||||
_, month, day := now.Date()
|
||||
hour, minute, second := now.Clock()
|
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
|
||||
@@ -157,6 +162,9 @@ func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string {
|
||||
|
||||
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
|
||||
// It's worth about 3X. Fprintf is hard.
|
||||
if Time != nil {
|
||||
now = *Time
|
||||
}
|
||||
_, month, day := now.Date()
|
||||
hour, minute, second := now.Clock()
|
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
|
||||
|
||||
21
vendor/k8s.io/klog/v2/internal/clock/clock.go
generated
vendored
21
vendor/k8s.io/klog/v2/internal/clock/clock.go
generated
vendored
@@ -39,16 +39,6 @@ type Clock interface {
|
||||
// Sleep sleeps for the provided duration d.
|
||||
// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
|
||||
Sleep(d time.Duration)
|
||||
// Tick returns the channel of a new Ticker.
|
||||
// This method does not allow to free/GC the backing ticker. Use
|
||||
// NewTicker from WithTicker instead.
|
||||
Tick(d time.Duration) <-chan time.Time
|
||||
}
|
||||
|
||||
// WithTicker allows for injecting fake or real clocks into code that
|
||||
// needs to do arbitrary things based on time.
|
||||
type WithTicker interface {
|
||||
Clock
|
||||
// NewTicker returns a new Ticker.
|
||||
NewTicker(time.Duration) Ticker
|
||||
}
|
||||
@@ -66,7 +56,7 @@ type WithDelayedExecution interface {
|
||||
// WithTickerAndDelayedExecution allows for injecting fake or real clocks
|
||||
// into code that needs Ticker and AfterFunc functionality
|
||||
type WithTickerAndDelayedExecution interface {
|
||||
WithTicker
|
||||
Clock
|
||||
// AfterFunc executes f in its own goroutine after waiting
|
||||
// for d duration and returns a Timer whose channel can be
|
||||
// closed by calling Stop() on the Timer.
|
||||
@@ -79,7 +69,7 @@ type Ticker interface {
|
||||
Stop()
|
||||
}
|
||||
|
||||
var _ = WithTicker(RealClock{})
|
||||
var _ Clock = RealClock{}
|
||||
|
||||
// RealClock really calls time.Now()
|
||||
type RealClock struct{}
|
||||
@@ -115,13 +105,6 @@ func (RealClock) AfterFunc(d time.Duration, f func()) Timer {
|
||||
}
|
||||
}
|
||||
|
||||
// Tick is the same as time.Tick(d)
|
||||
// This method does not allow to free/GC the backing ticker. Use
|
||||
// NewTicker instead.
|
||||
func (RealClock) Tick(d time.Duration) <-chan time.Time {
|
||||
return time.Tick(d)
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker.
|
||||
func (RealClock) NewTicker(d time.Duration) Ticker {
|
||||
return &realTicker{
|
||||
|
||||
106
vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
generated
vendored
106
vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
generated
vendored
@@ -18,6 +18,7 @@ package serialize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
@@ -171,83 +172,33 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
Formatter{}.KVListFormat(b, keysAndValues...)
|
||||
}
|
||||
|
||||
// KVFormat serializes one key/value pair into the provided buffer.
|
||||
// A space gets inserted before the pair.
|
||||
func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case textWriter:
|
||||
writeTextWriterValue(b, v)
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, f.AnyToString(value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
writeStringValue(b, false, f.AnyToString(v))
|
||||
}
|
||||
}
|
||||
|
||||
func KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
Formatter{}.KVFormat(b, k, v)
|
||||
}
|
||||
|
||||
// AnyToString is the historic fallback formatter.
|
||||
func (f Formatter) AnyToString(v interface{}) string {
|
||||
// formatAny is the fallback formatter for a value. It supports a hook (for
|
||||
// example, for YAML encoding) and itself uses JSON encoding.
|
||||
func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) {
|
||||
b.WriteRune('=')
|
||||
if f.AnyToStringHook != nil {
|
||||
return f.AnyToStringHook(v)
|
||||
b.WriteString(f.AnyToStringHook(v))
|
||||
return
|
||||
}
|
||||
return fmt.Sprintf("%+v", v)
|
||||
formatAsJSON(b, v)
|
||||
}
|
||||
|
||||
func formatAsJSON(b *bytes.Buffer, v interface{}) {
|
||||
encoder := json.NewEncoder(b)
|
||||
l := b.Len()
|
||||
if err := encoder.Encode(v); err != nil {
|
||||
// This shouldn't happen. We discard whatever the encoder
|
||||
// wrote and instead dump an error string.
|
||||
b.Truncate(l)
|
||||
b.WriteString(fmt.Sprintf(`"<internal error: %v>"`, err))
|
||||
return
|
||||
}
|
||||
// Remove trailing newline.
|
||||
b.Truncate(b.Len() - 1)
|
||||
}
|
||||
|
||||
// StringerToString converts a Stringer to a string,
|
||||
@@ -287,7 +238,7 @@ func ErrorToString(err error) (ret string) {
|
||||
}
|
||||
|
||||
func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
|
||||
b.WriteRune('=')
|
||||
b.WriteByte('=')
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintf(b, `"<panic: %s>"`, err)
|
||||
@@ -296,18 +247,13 @@ func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
|
||||
v.WriteText(b)
|
||||
}
|
||||
|
||||
func writeStringValue(b *bytes.Buffer, quote bool, v string) {
|
||||
func writeStringValue(b *bytes.Buffer, v string) {
|
||||
data := []byte(v)
|
||||
index := bytes.IndexByte(data, '\n')
|
||||
if index == -1 {
|
||||
b.WriteByte('=')
|
||||
if quote {
|
||||
// Simple string, quote quotation marks and non-printable characters.
|
||||
b.WriteString(strconv.Quote(v))
|
||||
return
|
||||
}
|
||||
// Non-string with no line breaks.
|
||||
b.WriteString(v)
|
||||
// Simple string, quote quotation marks and non-printable characters.
|
||||
b.WriteString(strconv.Quote(v))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
97
vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go
generated
vendored
Normal file
97
vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
//go:build !go1.21
|
||||
// +build !go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serialize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// KVFormat serializes one key/value pair into the provided buffer.
|
||||
// A space gets inserted before the pair.
|
||||
func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
// This is the version without slog support. Must be kept in sync with
|
||||
// the version in keyvalues_slog.go.
|
||||
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case textWriter:
|
||||
writeTextWriterValue(b, v)
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, v)
|
||||
case error:
|
||||
writeStringValue(b, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, value)
|
||||
default:
|
||||
f.formatAny(b, value)
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
f.formatAny(b, v)
|
||||
}
|
||||
}
|
||||
155
vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go
generated
vendored
Normal file
155
vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serialize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// KVFormat serializes one key/value pair into the provided buffer.
|
||||
// A space gets inserted before the pair.
|
||||
func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
// This is the version without slog support. Must be kept in sync with
|
||||
// the version in keyvalues_slog.go.
|
||||
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
//
|
||||
// slog.LogValuer does not need to be handled here because the handler will
|
||||
// already have resolved such special values to the final value for logging.
|
||||
switch v := v.(type) {
|
||||
case textWriter:
|
||||
writeTextWriterValue(b, v)
|
||||
case slog.Value:
|
||||
// This must come before fmt.Stringer because slog.Value implements
|
||||
// fmt.Stringer, but does not produce the output that we want.
|
||||
b.WriteByte('=')
|
||||
generateJSON(b, v)
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, v)
|
||||
case error:
|
||||
writeStringValue(b, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, value)
|
||||
default:
|
||||
f.formatAny(b, value)
|
||||
}
|
||||
case slog.LogValuer:
|
||||
value := slog.AnyValue(v).Resolve()
|
||||
if value.Kind() == slog.KindString {
|
||||
writeStringValue(b, value.String())
|
||||
} else {
|
||||
b.WriteByte('=')
|
||||
generateJSON(b, value)
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
f.formatAny(b, v)
|
||||
}
|
||||
}
|
||||
|
||||
// generateJSON has the same preference for plain strings as KVFormat.
|
||||
// In contrast to KVFormat it always produces valid JSON with no line breaks.
|
||||
func generateJSON(b *bytes.Buffer, v interface{}) {
|
||||
switch v := v.(type) {
|
||||
case slog.Value:
|
||||
switch v.Kind() {
|
||||
case slog.KindGroup:
|
||||
// Format as a JSON group. We must not involve f.AnyToStringHook (if there is any),
|
||||
// because there is no guarantee that it produces valid JSON.
|
||||
b.WriteByte('{')
|
||||
for i, attr := range v.Group() {
|
||||
if i > 0 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
b.WriteString(strconv.Quote(attr.Key))
|
||||
b.WriteByte(':')
|
||||
generateJSON(b, attr.Value)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
case slog.KindLogValuer:
|
||||
generateJSON(b, v.Resolve())
|
||||
default:
|
||||
// Peel off the slog.Value wrapper and format the actual value.
|
||||
generateJSON(b, v.Any())
|
||||
}
|
||||
case fmt.Stringer:
|
||||
b.WriteString(strconv.Quote(StringerToString(v)))
|
||||
case logr.Marshaler:
|
||||
generateJSON(b, MarshalerToValue(v))
|
||||
case slog.LogValuer:
|
||||
generateJSON(b, slog.AnyValue(v).Resolve().Any())
|
||||
case string:
|
||||
b.WriteString(strconv.Quote(v))
|
||||
case error:
|
||||
b.WriteString(strconv.Quote(v.Error()))
|
||||
default:
|
||||
formatAsJSON(b, v)
|
||||
}
|
||||
}
|
||||
96
vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go
generated
vendored
Normal file
96
vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sloghandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2/internal/severity"
|
||||
)
|
||||
|
||||
func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error {
|
||||
now := record.Time
|
||||
if now.IsZero() {
|
||||
// This format doesn't support printing entries without a time.
|
||||
now = time.Now()
|
||||
}
|
||||
|
||||
// slog has numeric severity levels, with 0 as default "info", negative for debugging, and
|
||||
// positive with some pre-defined levels for more important. Those ranges get mapped to
|
||||
// the corresponding klog levels where possible, with "info" the default that is used
|
||||
// also for negative debug levels.
|
||||
level := record.Level
|
||||
s := severity.InfoLog
|
||||
switch {
|
||||
case level >= slog.LevelError:
|
||||
s = severity.ErrorLog
|
||||
case level >= slog.LevelWarn:
|
||||
s = severity.WarningLog
|
||||
}
|
||||
|
||||
var file string
|
||||
var line int
|
||||
if record.PC != 0 {
|
||||
// Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70
|
||||
fs := runtime.CallersFrames([]uintptr{record.PC})
|
||||
f, _ := fs.Next()
|
||||
if f.File != "" {
|
||||
file = f.File
|
||||
if slash := strings.LastIndex(file, "/"); slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
line = f.Line
|
||||
}
|
||||
} else {
|
||||
file = "???"
|
||||
line = 1
|
||||
}
|
||||
|
||||
kvList := make([]interface{}, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = appendAttr(groups, kvList, attr)
|
||||
return true
|
||||
})
|
||||
|
||||
printWithInfos(file, line, now, nil, s, record.Message, kvList)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} {
|
||||
kvList := make([]interface{}, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = appendAttr(groups, kvList, attr)
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} {
|
||||
var key string
|
||||
if groups != "" {
|
||||
key = groups + "." + attr.Key
|
||||
} else {
|
||||
key = attr.Key
|
||||
}
|
||||
return append(kvList, key, attr.Value)
|
||||
}
|
||||
12
vendor/k8s.io/klog/v2/k8s_references.go
generated
vendored
12
vendor/k8s.io/klog/v2/k8s_references.go
generated
vendored
@@ -178,14 +178,14 @@ func (ks kobjSlice) process() (objs []interface{}, err string) {
|
||||
return objectRefs, ""
|
||||
}
|
||||
|
||||
var nilToken = []byte("<nil>")
|
||||
var nilToken = []byte("null")
|
||||
|
||||
func (ks kobjSlice) WriteText(out *bytes.Buffer) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as empty slice.
|
||||
out.WriteString("[]")
|
||||
// nil parameter, print as null.
|
||||
out.Write(nilToken)
|
||||
return
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
@@ -197,15 +197,15 @@ func (ks kobjSlice) WriteText(out *bytes.Buffer) {
|
||||
defer out.Write([]byte{']'})
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
if i > 0 {
|
||||
out.Write([]byte{' '})
|
||||
out.Write([]byte{','})
|
||||
}
|
||||
item := s.Index(i).Interface()
|
||||
if item == nil {
|
||||
out.Write(nilToken)
|
||||
} else if v, ok := item.(KMetadata); ok {
|
||||
KObj(v).writeUnquoted(out)
|
||||
KObj(v).WriteText(out)
|
||||
} else {
|
||||
fmt.Fprintf(out, "<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
fmt.Fprintf(out, `"<KObjSlice needs a slice of values implementing KMetadata, got type %T>"`, item)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
39
vendor/k8s.io/klog/v2/k8s_references_slog.go
generated
vendored
Normal file
39
vendor/k8s.io/klog/v2/k8s_references_slog.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package klog
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
func (ref ObjectRef) LogValue() slog.Value {
|
||||
if ref.Namespace != "" {
|
||||
return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace))
|
||||
}
|
||||
return slog.GroupValue(slog.String("name", ref.Name))
|
||||
}
|
||||
|
||||
var _ slog.LogValuer = ObjectRef{}
|
||||
|
||||
func (ks kobjSlice) LogValue() slog.Value {
|
||||
return slog.AnyValue(ks.MarshalLog())
|
||||
}
|
||||
|
||||
var _ slog.LogValuer = kobjSlice{}
|
||||
174
vendor/k8s.io/klog/v2/klog.go
generated
vendored
174
vendor/k8s.io/klog/v2/klog.go
generated
vendored
@@ -14,9 +14,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
|
||||
// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
|
||||
// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
|
||||
// Package klog contains the following functionality:
|
||||
//
|
||||
// - output routing as defined via command line flags ([InitFlags])
|
||||
// - log formatting as text, either with a single, unstructured string ([Info], [Infof], etc.)
|
||||
// or as a structured log entry with message and key/value pairs ([InfoS], etc.)
|
||||
// - management of a go-logr [Logger] ([SetLogger], [Background], [TODO])
|
||||
// - helper functions for logging values ([Format]) and managing the state of klog ([CaptureState], [State.Restore])
|
||||
// - wrappers for [logr] APIs for contextual logging where the wrappers can
|
||||
// be turned into no-ops ([EnableContextualLogging], [NewContext], [FromContext],
|
||||
// [LoggerWithValues], [LoggerWithName]); if the ability to turn off
|
||||
// contextual logging is not needed, then go-logr can also be used directly
|
||||
// - type aliases for go-logr types to simplify imports in code which uses both (e.g. [Logger])
|
||||
// - [k8s.io/klog/v2/textlogger]: a logger which uses the same formatting as klog log with
|
||||
// simpler output routing; beware that it comes with its own command line flags
|
||||
// and does not use the ones from klog
|
||||
// - [k8s.io/klog/v2/ktesting]: per-test output in Go unit tests
|
||||
// - [k8s.io/klog/v2/klogr]: a deprecated, standalone [logr.Logger] on top of the main klog package;
|
||||
// use [Background] instead if klog output routing is needed, [k8s.io/klog/v2/textlogger] if not
|
||||
// - [k8s.io/klog/v2/examples]: demos of this functionality
|
||||
// - [k8s.io/klog/v2/test]: reusable tests for [logr.Logger] implementations
|
||||
//
|
||||
// Basic examples:
|
||||
//
|
||||
@@ -387,13 +404,6 @@ func (t *traceLocation) Set(value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushSyncWriter is the interface satisfied by logging destinations.
|
||||
type flushSyncWriter interface {
|
||||
Flush() error
|
||||
Sync() error
|
||||
io.Writer
|
||||
}
|
||||
|
||||
var logging loggingT
|
||||
var commandLine flag.FlagSet
|
||||
|
||||
@@ -415,7 +425,7 @@ func init() {
|
||||
logging.stderrThreshold = severityValue{
|
||||
Severity: severity.ErrorLog, // Default stderrThreshold is ERROR.
|
||||
}
|
||||
commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)")
|
||||
commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)")
|
||||
commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
|
||||
@@ -469,7 +479,7 @@ type settings struct {
|
||||
// Access to all of the following fields must be protected via a mutex.
|
||||
|
||||
// file holds writer for each of the log types.
|
||||
file [severity.NumSeverity]flushSyncWriter
|
||||
file [severity.NumSeverity]io.Writer
|
||||
// flushInterval is the interval for periodic flushing. If zero,
|
||||
// the global default will be used.
|
||||
flushInterval time.Duration
|
||||
@@ -518,9 +528,7 @@ type settings struct {
|
||||
func (s settings) deepCopy() settings {
|
||||
// vmodule is a slice and would be shared, so we have copy it.
|
||||
filter := make([]modulePat, len(s.vmodule.filter))
|
||||
for i := range s.vmodule.filter {
|
||||
filter[i] = s.vmodule.filter[i]
|
||||
}
|
||||
copy(filter, s.vmodule.filter)
|
||||
s.vmodule.filter = filter
|
||||
|
||||
if s.logger != nil {
|
||||
@@ -657,16 +665,15 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin
|
||||
}
|
||||
}
|
||||
}
|
||||
return l.formatHeader(s, file, line), file, line
|
||||
return l.formatHeader(s, file, line, timeNow()), file, line
|
||||
}
|
||||
|
||||
// formatHeader formats a log header using the provided file name and line number.
|
||||
func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
|
||||
func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer {
|
||||
buf := buffer.GetBuffer()
|
||||
if l.skipHeaders {
|
||||
return buf
|
||||
}
|
||||
now := timeNow()
|
||||
buf.FormatHeader(s, file, line, now)
|
||||
return buf
|
||||
}
|
||||
@@ -676,6 +683,10 @@ func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFil
|
||||
}
|
||||
|
||||
func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
|
||||
if false {
|
||||
_ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println
|
||||
}
|
||||
|
||||
buf, file, line := l.header(s, depth)
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
@@ -696,7 +707,15 @@ func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilte
|
||||
}
|
||||
|
||||
func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
|
||||
if false {
|
||||
_ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print
|
||||
}
|
||||
|
||||
buf, file, line := l.header(s, depth)
|
||||
l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...)
|
||||
}
|
||||
|
||||
func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
@@ -719,6 +738,10 @@ func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilt
|
||||
}
|
||||
|
||||
func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) {
|
||||
if false {
|
||||
_ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf
|
||||
}
|
||||
|
||||
buf, file, line := l.header(s, depth)
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
@@ -741,7 +764,7 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter Lo
|
||||
// alsoLogToStderr is true, the log message always appears on standard error; it
|
||||
// will also appear in the log file unless --logtostderr is set.
|
||||
func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
|
||||
buf := l.formatHeader(s, file, line)
|
||||
buf := l.formatHeader(s, file, line, timeNow())
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
@@ -759,7 +782,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, fil
|
||||
l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr)
|
||||
}
|
||||
|
||||
// if loggr is specified, will call loggr.Error, otherwise output with logging module.
|
||||
// if logger is specified, will call logger.Error, otherwise output with logging module.
|
||||
func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
if filter != nil {
|
||||
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
|
||||
@@ -771,7 +794,7 @@ func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth
|
||||
l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// if loggr is specified, will call loggr.Info, otherwise output with logging module.
|
||||
// if logger is specified, will call logger.Info, otherwise output with logging module.
|
||||
func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
if filter != nil {
|
||||
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
|
||||
@@ -783,7 +806,7 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str
|
||||
l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// printS is called from infoS and errorS if loggr is not specified.
|
||||
// printS is called from infoS and errorS if logger is not specified.
|
||||
// set log severity by s
|
||||
func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
|
||||
// Only create a new buffer if we don't have one cached.
|
||||
@@ -796,37 +819,17 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
||||
serialize.KVListFormat(&b.Buffer, "err", err)
|
||||
}
|
||||
serialize.KVListFormat(&b.Buffer, keysAndValues...)
|
||||
l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
|
||||
l.printDepth(s, nil, nil, depth+1, &b.Buffer)
|
||||
// Make the buffer available for reuse.
|
||||
buffer.PutBuffer(b)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
type redirectBuffer struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
|
||||
return rb.w.Write(bytes)
|
||||
}
|
||||
|
||||
// SetOutput sets the output destination for all severities
|
||||
func SetOutput(w io.Writer) {
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[s] = rb
|
||||
logging.file[s] = w
|
||||
}
|
||||
}
|
||||
|
||||
@@ -838,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) {
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
|
||||
}
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[sev] = rb
|
||||
logging.file[sev] = w
|
||||
}
|
||||
|
||||
// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
|
||||
@@ -873,6 +873,9 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu
|
||||
if logger.writeKlogBuffer != nil {
|
||||
logger.writeKlogBuffer(data)
|
||||
} else {
|
||||
if len(data) > 0 && data[len(data)-1] == '\n' {
|
||||
data = data[:len(data)-1]
|
||||
}
|
||||
// TODO: set 'severity' and caller information as structured log info
|
||||
// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
|
||||
if s == severity.ErrorLog {
|
||||
@@ -897,7 +900,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu
|
||||
l.exit(err)
|
||||
}
|
||||
}
|
||||
l.file[severity.InfoLog].Write(data)
|
||||
_, _ = l.file[severity.InfoLog].Write(data)
|
||||
} else {
|
||||
if l.file[s] == nil {
|
||||
if err := l.createFiles(s); err != nil {
|
||||
@@ -907,20 +910,20 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu
|
||||
}
|
||||
|
||||
if l.oneOutput {
|
||||
l.file[s].Write(data)
|
||||
_, _ = l.file[s].Write(data)
|
||||
} else {
|
||||
switch s {
|
||||
case severity.FatalLog:
|
||||
l.file[severity.FatalLog].Write(data)
|
||||
_, _ = l.file[severity.FatalLog].Write(data)
|
||||
fallthrough
|
||||
case severity.ErrorLog:
|
||||
l.file[severity.ErrorLog].Write(data)
|
||||
_, _ = l.file[severity.ErrorLog].Write(data)
|
||||
fallthrough
|
||||
case severity.WarningLog:
|
||||
l.file[severity.WarningLog].Write(data)
|
||||
_, _ = l.file[severity.WarningLog].Write(data)
|
||||
fallthrough
|
||||
case severity.InfoLog:
|
||||
l.file[severity.InfoLog].Write(data)
|
||||
_, _ = l.file[severity.InfoLog].Write(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -946,7 +949,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu
|
||||
logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
|
||||
for log := severity.FatalLog; log >= severity.InfoLog; log-- {
|
||||
if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
|
||||
f.Write(trace)
|
||||
_, _ = f.Write(trace)
|
||||
}
|
||||
}
|
||||
l.mu.Unlock()
|
||||
@@ -978,7 +981,8 @@ func (l *loggingT) exit(err error) {
|
||||
logExitFunc(err)
|
||||
return
|
||||
}
|
||||
l.flushAll()
|
||||
needToSync := l.flushAll()
|
||||
l.syncAll(needToSync)
|
||||
OsExit(2)
|
||||
}
|
||||
|
||||
@@ -995,10 +999,6 @@ type syncBuffer struct {
|
||||
maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Sync() error {
|
||||
return sb.file.Sync()
|
||||
}
|
||||
|
||||
// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
|
||||
func CalculateMaxSize() uint64 {
|
||||
if logging.logFile != "" {
|
||||
@@ -1102,7 +1102,7 @@ const flushInterval = 5 * time.Second
|
||||
// flushDaemon periodically flushes the log file buffers.
|
||||
type flushDaemon struct {
|
||||
mu sync.Mutex
|
||||
clock clock.WithTicker
|
||||
clock clock.Clock
|
||||
flush func()
|
||||
stopC chan struct{}
|
||||
stopDone chan struct{}
|
||||
@@ -1110,7 +1110,7 @@ type flushDaemon struct {
|
||||
|
||||
// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a
|
||||
// clock.RealClock is used.
|
||||
func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon {
|
||||
func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon {
|
||||
if tickClock == nil {
|
||||
tickClock = clock.RealClock{}
|
||||
}
|
||||
@@ -1190,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) {
|
||||
// lockAndFlushAll is like flushAll but locks l.mu first.
|
||||
func (l *loggingT) lockAndFlushAll() {
|
||||
l.mu.Lock()
|
||||
l.flushAll()
|
||||
needToSync := l.flushAll()
|
||||
l.mu.Unlock()
|
||||
// Some environments are slow when syncing and holding the lock might cause contention.
|
||||
l.syncAll(needToSync)
|
||||
}
|
||||
|
||||
// flushAll flushes all the logs and attempts to "sync" their data to disk.
|
||||
// flushAll flushes all the logs
|
||||
// l.mu is held.
|
||||
func (l *loggingT) flushAll() {
|
||||
//
|
||||
// The result is the number of files which need to be synced and the pointers to them.
|
||||
func (l *loggingT) flushAll() fileArray {
|
||||
var needToSync fileArray
|
||||
|
||||
// Flush from fatal down, in case there's trouble flushing.
|
||||
for s := severity.FatalLog; s >= severity.InfoLog; s-- {
|
||||
file := l.file[s]
|
||||
if file != nil {
|
||||
file.Flush() // ignore error
|
||||
file.Sync() // ignore error
|
||||
if sb, ok := file.(*syncBuffer); ok && sb.file != nil {
|
||||
_ = sb.Flush() // ignore error
|
||||
needToSync.files[needToSync.num] = sb.file
|
||||
needToSync.num++
|
||||
}
|
||||
}
|
||||
if logging.loggerOptions.flush != nil {
|
||||
logging.loggerOptions.flush()
|
||||
}
|
||||
return needToSync
|
||||
}
|
||||
|
||||
type fileArray struct {
|
||||
num int
|
||||
files [severity.NumSeverity]*os.File
|
||||
}
|
||||
|
||||
// syncAll attempts to "sync" their data to disk.
|
||||
func (l *loggingT) syncAll(needToSync fileArray) {
|
||||
// Flush from fatal down, in case there's trouble flushing.
|
||||
for i := 0; i < needToSync.num; i++ {
|
||||
_ = needToSync.files[i].Sync() // ignore error
|
||||
}
|
||||
}
|
||||
|
||||
// CopyStandardLogTo arranges for messages written to the Go "log" package's
|
||||
@@ -1228,6 +1249,19 @@ func CopyStandardLogTo(name string) {
|
||||
stdLog.SetOutput(logBridge(sev))
|
||||
}
|
||||
|
||||
// NewStandardLogger returns a Logger that writes to the klog logs for the
|
||||
// named and lower severities.
|
||||
//
|
||||
// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
|
||||
// recognized, NewStandardLogger panics.
|
||||
func NewStandardLogger(name string) *stdLog.Logger {
|
||||
sev, ok := severity.ByName(name)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name))
|
||||
}
|
||||
return stdLog.New(logBridge(sev), "", stdLog.Lshortfile)
|
||||
}
|
||||
|
||||
// logBridge provides the Write method that enables CopyStandardLogTo to connect
|
||||
// Go's standard logs to the logs provided by this package.
|
||||
type logBridge severity.Severity
|
||||
@@ -1268,9 +1302,7 @@ func (l *loggingT) setV(pc uintptr) Level {
|
||||
fn := runtime.FuncForPC(pc)
|
||||
file, _ := fn.FileLine(pc)
|
||||
// The file is something like /a/b/c/d.go. We want just the d.
|
||||
if strings.HasSuffix(file, ".go") {
|
||||
file = file[:len(file)-3]
|
||||
}
|
||||
file = strings.TrimSuffix(file, ".go")
|
||||
if slash := strings.LastIndex(file, "/"); slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
|
||||
4
vendor/k8s.io/klog/v2/klog_file.go
generated
vendored
4
vendor/k8s.io/klog/v2/klog_file.go
generated
vendored
@@ -109,8 +109,8 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string,
|
||||
f, err := openOrCreate(fname, startup)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(name, symlink) // ignore err
|
||||
_ = os.Remove(symlink) // ignore err
|
||||
_ = os.Symlink(name, symlink) // ignore err
|
||||
return f, fname, nil
|
||||
}
|
||||
lastErr = err
|
||||
|
||||
46
vendor/k8s.io/klog/v2/klogr.go
generated
vendored
46
vendor/k8s.io/klog/v2/klogr.go
generated
vendored
@@ -22,6 +22,11 @@ import (
|
||||
"k8s.io/klog/v2/internal/serialize"
|
||||
)
|
||||
|
||||
const (
|
||||
// nameKey is used to log the `WithName` values as an additional attribute.
|
||||
nameKey = "logger"
|
||||
)
|
||||
|
||||
// NewKlogr returns a logger that is functionally identical to
|
||||
// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The
|
||||
// difference is that it uses a simpler implementation.
|
||||
@@ -32,10 +37,15 @@ func NewKlogr() Logger {
|
||||
// klogger is a subset of klogr/klogr.go. It had to be copied to break an
|
||||
// import cycle (klogr wants to use klog, and klog wants to use klogr).
|
||||
type klogger struct {
|
||||
level int
|
||||
callDepth int
|
||||
prefix string
|
||||
values []interface{}
|
||||
|
||||
// hasPrefix is true if the first entry in values is the special
|
||||
// nameKey key/value. Such an entry gets added and later updated in
|
||||
// WithName.
|
||||
hasPrefix bool
|
||||
|
||||
values []interface{}
|
||||
groups string
|
||||
}
|
||||
|
||||
func (l *klogger) Init(info logr.RuntimeInfo) {
|
||||
@@ -44,34 +54,40 @@ func (l *klogger) Init(info logr.RuntimeInfo) {
|
||||
|
||||
func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
// Skip this function.
|
||||
VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
}
|
||||
|
||||
func (l *klogger) Enabled(level int) bool {
|
||||
// Skip this function and logr.Logger.Info where Enabled is called.
|
||||
return VDepth(l.callDepth+2, Level(level)).Enabled()
|
||||
return VDepth(l.callDepth+1, Level(level)).Enabled()
|
||||
}
|
||||
|
||||
func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
ErrorSDepth(l.callDepth+1, err, msg, merged...)
|
||||
}
|
||||
|
||||
// WithName returns a new logr.Logger with the specified name appended. klogr
|
||||
// uses '/' characters to separate name elements. Callers should not pass '/'
|
||||
// uses '.' characters to separate name elements. Callers should not pass '.'
|
||||
// in the provided name string, but this library does not actually enforce that.
|
||||
func (l klogger) WithName(name string) logr.LogSink {
|
||||
if len(l.prefix) > 0 {
|
||||
l.prefix = l.prefix + "/"
|
||||
if l.hasPrefix {
|
||||
// Copy slice and modify value. No length checks and type
|
||||
// assertions are needed because hasPrefix is only true if the
|
||||
// first two elements exist and are key/value strings.
|
||||
v := make([]interface{}, 0, len(l.values))
|
||||
v = append(v, l.values...)
|
||||
prefix, _ := v[1].(string)
|
||||
v[1] = prefix + "." + name
|
||||
l.values = v
|
||||
} else {
|
||||
// Preprend new key/value pair.
|
||||
v := make([]interface{}, 0, 2+len(l.values))
|
||||
v = append(v, nameKey, name)
|
||||
v = append(v, l.values...)
|
||||
l.values = v
|
||||
l.hasPrefix = true
|
||||
}
|
||||
l.prefix += name
|
||||
return &l
|
||||
}
|
||||
|
||||
|
||||
96
vendor/k8s.io/klog/v2/klogr_slog.go
generated
vendored
Normal file
96
vendor/k8s.io/klog/v2/klogr_slog.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package klog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"k8s.io/klog/v2/internal/buffer"
|
||||
"k8s.io/klog/v2/internal/serialize"
|
||||
"k8s.io/klog/v2/internal/severity"
|
||||
"k8s.io/klog/v2/internal/sloghandler"
|
||||
)
|
||||
|
||||
func (l *klogger) Handle(ctx context.Context, record slog.Record) error {
|
||||
if logging.logger != nil {
|
||||
if slogSink, ok := logging.logger.GetSink().(logr.SlogSink); ok {
|
||||
// Let that logger do the work.
|
||||
return slogSink.Handle(ctx, record)
|
||||
}
|
||||
}
|
||||
|
||||
return sloghandler.Handle(ctx, record, l.groups, slogOutput)
|
||||
}
|
||||
|
||||
// slogOutput corresponds to several different functions in klog.go.
|
||||
// It goes through some of the same checks and formatting steps before
|
||||
// it ultimately converges by calling logging.printWithInfos.
|
||||
func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) {
|
||||
// See infoS.
|
||||
if logging.logger != nil {
|
||||
// Taking this path happens when klog has a logger installed
|
||||
// as backend which doesn't support slog. Not good, we have to
|
||||
// guess about the call depth and drop the actual location.
|
||||
logger := logging.logger.WithCallDepth(2)
|
||||
if s > severity.ErrorLog {
|
||||
logger.Error(err, msg, kvList...)
|
||||
} else {
|
||||
logger.Info(msg, kvList...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// See printS.
|
||||
b := buffer.GetBuffer()
|
||||
b.WriteString(strconv.Quote(msg))
|
||||
if err != nil {
|
||||
serialize.KVListFormat(&b.Buffer, "err", err)
|
||||
}
|
||||
serialize.KVListFormat(&b.Buffer, kvList...)
|
||||
|
||||
// See print + header.
|
||||
buf := logging.formatHeader(s, file, line, now)
|
||||
logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer)
|
||||
|
||||
buffer.PutBuffer(b)
|
||||
}
|
||||
|
||||
func (l *klogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
|
||||
clone := *l
|
||||
clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs))
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (l *klogger) WithGroup(name string) logr.SlogSink {
|
||||
clone := *l
|
||||
if clone.groups != "" {
|
||||
clone.groups += "." + name
|
||||
} else {
|
||||
clone.groups = name
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
var _ logr.SlogSink = &klogger{}
|
||||
34
vendor/k8s.io/klog/v2/safeptr.go
generated
vendored
Normal file
34
vendor/k8s.io/klog/v2/safeptr.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package klog
|
||||
|
||||
// SafePtr is a function that takes a pointer of any type (T) as an argument.
|
||||
// If the provided pointer is not nil, it returns the same pointer. If it is nil, it returns nil instead.
|
||||
//
|
||||
// This function is particularly useful to prevent nil pointer dereferencing when:
|
||||
//
|
||||
// - The type implements interfaces that are called by the logger, such as `fmt.Stringer`.
|
||||
// - And these interface implementations do not perform nil checks themselves.
|
||||
func SafePtr[T any](p *T) any {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return p
|
||||
}
|
||||
35
vendor/modules.txt
vendored
35
vendor/modules.txt
vendored
@@ -22,10 +22,10 @@ code.cloudfoundry.org/clock/fakeclock
|
||||
# dario.cat/mergo v1.0.1
|
||||
## explicit; go 1.13
|
||||
dario.cat/mergo
|
||||
# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
|
||||
# github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6
|
||||
## explicit; go 1.20
|
||||
github.com/AdaLogics/go-fuzz-headers
|
||||
# github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0
|
||||
# github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2
|
||||
## explicit; go 1.18
|
||||
github.com/AdamKorcz/go-118-fuzz-build/testing
|
||||
# github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c
|
||||
@@ -48,8 +48,8 @@ github.com/Microsoft/go-winio/pkg/etwlogrus
|
||||
github.com/Microsoft/go-winio/pkg/fs
|
||||
github.com/Microsoft/go-winio/pkg/guid
|
||||
github.com/Microsoft/go-winio/vhd
|
||||
# github.com/Microsoft/hcsshim v0.12.8
|
||||
## explicit; go 1.21
|
||||
# github.com/Microsoft/hcsshim v0.12.9
|
||||
## explicit; go 1.22
|
||||
github.com/Microsoft/hcsshim
|
||||
github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options
|
||||
github.com/Microsoft/hcsshim/computestorage
|
||||
@@ -361,13 +361,18 @@ github.com/containerd/continuity/devices
|
||||
github.com/containerd/continuity/driver
|
||||
github.com/containerd/continuity/fs
|
||||
github.com/containerd/continuity/sysx
|
||||
# github.com/containerd/errdefs v0.3.0
|
||||
# github.com/containerd/errdefs v1.0.0
|
||||
## explicit; go 1.20
|
||||
github.com/containerd/errdefs
|
||||
# github.com/containerd/errdefs/pkg v0.3.0
|
||||
## explicit; go 1.22
|
||||
github.com/containerd/errdefs/pkg/errgrpc
|
||||
github.com/containerd/errdefs/pkg/internal/cause
|
||||
github.com/containerd/errdefs/pkg/internal/types
|
||||
# github.com/containerd/fifo v1.1.0
|
||||
## explicit; go 1.18
|
||||
github.com/containerd/fifo
|
||||
# github.com/containerd/go-cni v1.1.10
|
||||
# github.com/containerd/go-cni v1.1.11
|
||||
## explicit; go 1.21
|
||||
github.com/containerd/go-cni
|
||||
# github.com/containerd/go-runc v1.1.0
|
||||
@@ -389,13 +394,13 @@ github.com/containerd/platforms
|
||||
## explicit; go 1.19
|
||||
github.com/containerd/stargz-snapshotter/estargz
|
||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||
# github.com/containerd/ttrpc v1.2.5
|
||||
# github.com/containerd/ttrpc v1.2.7
|
||||
## explicit; go 1.19
|
||||
github.com/containerd/ttrpc
|
||||
# github.com/containerd/typeurl/v2 v2.2.3
|
||||
## explicit; go 1.21
|
||||
github.com/containerd/typeurl/v2
|
||||
# github.com/containernetworking/cni v1.2.2
|
||||
# github.com/containernetworking/cni v1.2.3
|
||||
## explicit; go 1.21
|
||||
github.com/containernetworking/cni/libcni
|
||||
github.com/containernetworking/cni/pkg/invoke
|
||||
@@ -483,8 +488,8 @@ github.com/fernet/fernet-go
|
||||
# github.com/fluent/fluent-logger-golang v1.9.0
|
||||
## explicit
|
||||
github.com/fluent/fluent-logger-golang/fluent
|
||||
# github.com/fsnotify/fsnotify v1.6.0
|
||||
## explicit; go 1.16
|
||||
# github.com/fsnotify/fsnotify v1.7.0
|
||||
## explicit; go 1.17
|
||||
github.com/fsnotify/fsnotify
|
||||
# github.com/go-logr/logr v1.4.2
|
||||
## explicit; go 1.18
|
||||
@@ -1087,9 +1092,10 @@ github.com/spf13/cobra
|
||||
# github.com/spf13/pflag v1.0.5
|
||||
## explicit; go 1.12
|
||||
github.com/spf13/pflag
|
||||
# github.com/stretchr/testify v1.9.0
|
||||
# github.com/stretchr/testify v1.10.0
|
||||
## explicit; go 1.17
|
||||
github.com/stretchr/testify/assert
|
||||
github.com/stretchr/testify/assert/yaml
|
||||
github.com/stretchr/testify/require
|
||||
github.com/stretchr/testify/suite
|
||||
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
@@ -1121,7 +1127,7 @@ github.com/tonistiigi/units
|
||||
# github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab
|
||||
## explicit; go 1.12
|
||||
github.com/tonistiigi/vt100
|
||||
# github.com/vbatts/tar-split v0.11.5
|
||||
# github.com/vbatts/tar-split v0.11.6
|
||||
## explicit; go 1.17
|
||||
github.com/vbatts/tar-split/archive/tar
|
||||
github.com/vbatts/tar-split/tar/asm
|
||||
@@ -1597,14 +1603,15 @@ gotest.tools/v3/internal/format
|
||||
gotest.tools/v3/internal/source
|
||||
gotest.tools/v3/poll
|
||||
gotest.tools/v3/skip
|
||||
# k8s.io/klog/v2 v2.90.1
|
||||
## explicit; go 1.13
|
||||
# k8s.io/klog/v2 v2.130.1
|
||||
## explicit; go 1.18
|
||||
k8s.io/klog/v2
|
||||
k8s.io/klog/v2/internal/buffer
|
||||
k8s.io/klog/v2/internal/clock
|
||||
k8s.io/klog/v2/internal/dbg
|
||||
k8s.io/klog/v2/internal/serialize
|
||||
k8s.io/klog/v2/internal/severity
|
||||
k8s.io/klog/v2/internal/sloghandler
|
||||
# resenje.org/singleflight v0.4.3
|
||||
## explicit; go 1.18
|
||||
resenje.org/singleflight
|
||||
|
||||
Reference in New Issue
Block a user