mirror of
https://github.com/moby/moby.git
synced 2026-01-12 11:11:44 +00:00
Compare commits
140 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7de3a1f2ac | ||
|
|
60eece38cd | ||
|
|
9dc7e0b2ae | ||
|
|
54ac8bbe37 | ||
|
|
f8383fa45e | ||
|
|
6e1af3d5d8 | ||
|
|
0eae0850ac | ||
|
|
e6a2c9bebb | ||
|
|
5ff6cef316 | ||
|
|
4b98bfd07d | ||
|
|
bd777a5806 | ||
|
|
ae548176dc | ||
|
|
0dd255c6f7 | ||
|
|
122682205f | ||
|
|
9f102b3b5b | ||
|
|
6aa6d461da | ||
|
|
58af0513c0 | ||
|
|
75891766e4 | ||
|
|
3ec9003a14 | ||
|
|
bffbf551fc | ||
|
|
caef5cc70c | ||
|
|
5a91b941b8 | ||
|
|
34471d3259 | ||
|
|
782843c0d1 | ||
|
|
bec84c9c31 | ||
|
|
2166ac395a | ||
|
|
d0315c9824 | ||
|
|
ff546aff14 | ||
|
|
15db81eeaa | ||
|
|
23af4b75e9 | ||
|
|
da8bfd963e | ||
|
|
0ce4415ff2 | ||
|
|
14a48ac308 | ||
|
|
c50e7e6ca2 | ||
|
|
2a4ea4749d | ||
|
|
b536253047 | ||
|
|
3216abd8db | ||
|
|
dd5a6fdbac | ||
|
|
0c5e131330 | ||
|
|
b50a85d0ed | ||
|
|
8105391708 | ||
|
|
6209d5bd68 | ||
|
|
25cffb9dec | ||
|
|
21279f652e | ||
|
|
a27066d1ca | ||
|
|
e88d4ea298 | ||
|
|
613d955d38 | ||
|
|
e962b3e06e | ||
|
|
33dbea3c37 | ||
|
|
5e46424b29 | ||
|
|
5ca50f5c24 | ||
|
|
a599caf7e9 | ||
|
|
89903672a7 | ||
|
|
dbf6db9306 | ||
|
|
122e5e1442 | ||
|
|
55a4cadaa5 | ||
|
|
042dad56d0 | ||
|
|
553d915ef4 | ||
|
|
c70f626351 | ||
|
|
5966382473 | ||
|
|
3edc25412a | ||
|
|
65906e44b0 | ||
|
|
a298720e8f | ||
|
|
88a3e540c9 | ||
|
|
90fc11f69a | ||
|
|
182df40d13 | ||
|
|
2544c68655 | ||
|
|
be77069539 | ||
|
|
0299ca1d73 | ||
|
|
aff4659c67 | ||
|
|
c47231e5cf | ||
|
|
962f331e76 | ||
|
|
71f9bfe47f | ||
|
|
017213c2b0 | ||
|
|
210f03082b | ||
|
|
2f78133a0a | ||
|
|
675593bb4f | ||
|
|
9c291b1745 | ||
|
|
a23ff1bb1a | ||
|
|
c78cecd77f | ||
|
|
f95f4c7d22 | ||
|
|
508e20b4a0 | ||
|
|
f14cf10618 | ||
|
|
0cd951e4dd | ||
|
|
b08a51fe16 | ||
|
|
d151b0f87f | ||
|
|
c6ba9a5124 | ||
|
|
4673a3ca2c | ||
|
|
30f8908102 | ||
|
|
7454d6a2e6 | ||
|
|
65cc597cea | ||
|
|
b722836927 | ||
|
|
e8ecb9c76d | ||
|
|
e6cae1f237 | ||
|
|
8ec448db6b | ||
|
|
274310807e | ||
|
|
886e726984 | ||
|
|
a0f0f7e77e | ||
|
|
91903e81ca | ||
|
|
ccfe0a41d4 | ||
|
|
ed11c9c562 | ||
|
|
d046451b34 | ||
|
|
e16a25e442 | ||
|
|
b1aac1b134 | ||
|
|
fffbe84ded | ||
|
|
c55eeb3cfa | ||
|
|
9f6600deed | ||
|
|
f26fd4a73a | ||
|
|
70fe516b46 | ||
|
|
303e26dce7 | ||
|
|
f7ce828e9e | ||
|
|
085fa9bf66 | ||
|
|
577ca9b076 | ||
|
|
98ddccbbfe | ||
|
|
03ecc6f5e6 | ||
|
|
637205391b | ||
|
|
d16d8bd448 | ||
|
|
2ebb5ca1c0 | ||
|
|
3d56d734db | ||
|
|
0a2f5085ee | ||
|
|
3141ea5c8b | ||
|
|
4f25076181 | ||
|
|
d93cc7edc0 | ||
|
|
5beae56515 | ||
|
|
ee5909c2d0 | ||
|
|
f37d6f5f48 | ||
|
|
fd828b6766 | ||
|
|
7ac688aa0f | ||
|
|
584a30c772 | ||
|
|
60605eb1da | ||
|
|
71b8e0339c | ||
|
|
08e8912d7c | ||
|
|
aee8b332bf | ||
|
|
12c4e03288 | ||
|
|
e2e670299f | ||
|
|
f42f65b464 | ||
|
|
935787c19c | ||
|
|
bd19301d9e | ||
|
|
79c31c12fc | ||
|
|
50bd133ad3 |
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,9 +22,12 @@ Please provide the following information:
|
||||
**- Description for the changelog**
|
||||
<!--
|
||||
Write a short (one line) summary that describes the changes in this
|
||||
pull request for inclusion in the changelog:
|
||||
pull request for inclusion in the changelog.
|
||||
It must be placed inside the below triple backticks section:
|
||||
-->
|
||||
```markdown changelog
|
||||
|
||||
```
|
||||
|
||||
**- A picture of a cute animal (not mandatory but encouraged)**
|
||||
|
||||
|
||||
10
.github/workflows/.dco.yml
vendored
10
.github/workflows/.dco.yml
vendored
@@ -3,6 +3,15 @@ name: .dco
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
@@ -12,6 +21,7 @@ env:
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
|
||||
10
.github/workflows/.test-prepare.yml
vendored
10
.github/workflows/.test-prepare.yml
vendored
@@ -3,6 +3,15 @@ name: .test-prepare
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
outputs:
|
||||
@@ -13,6 +22,7 @@ on:
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
matrix: ${{ steps.set.outputs.matrix }}
|
||||
steps:
|
||||
|
||||
32
.github/workflows/.test.yml
vendored
32
.github/workflows/.test.yml
vendored
@@ -3,6 +3,15 @@ name: .test
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@@ -12,7 +21,7 @@ on:
|
||||
default: "graphdriver"
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.8"
|
||||
GO_VERSION: "1.22.8"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
ITG_CLI_MATRIX_SIZE: 6
|
||||
@@ -23,8 +32,8 @@ env:
|
||||
jobs:
|
||||
unit:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -70,11 +79,12 @@ jobs:
|
||||
with:
|
||||
name: test-reports-unit-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
unit-report:
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- unit
|
||||
@@ -101,8 +111,8 @@ jobs:
|
||||
|
||||
docker-py:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -150,11 +160,12 @@ jobs:
|
||||
with:
|
||||
name: test-reports-docker-py-${{ inputs.storage }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
integration-flaky:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
@@ -181,8 +192,8 @@ jobs:
|
||||
|
||||
integration:
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -271,11 +282,12 @@ jobs:
|
||||
with:
|
||||
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
integration-report:
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- integration
|
||||
@@ -303,6 +315,7 @@ jobs:
|
||||
|
||||
integration-cli-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
outputs:
|
||||
matrix: ${{ steps.tests.outputs.matrix }}
|
||||
@@ -338,8 +351,8 @@ jobs:
|
||||
|
||||
integration-cli:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- integration-cli-prepare
|
||||
strategy:
|
||||
@@ -410,11 +423,12 @@ jobs:
|
||||
with:
|
||||
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: /tmp/reports/*
|
||||
retention-days: 1
|
||||
|
||||
integration-cli-report:
|
||||
runs-on: ubuntu-20.04
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 10
|
||||
continue-on-error: ${{ github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
- integration-cli
|
||||
|
||||
21
.github/workflows/.windows.yml
vendored
21
.github/workflows/.windows.yml
vendored
@@ -3,6 +3,15 @@ name: .windows
|
||||
|
||||
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@@ -19,7 +28,7 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.8"
|
||||
GO_VERSION: "1.22.8"
|
||||
GOTESTLIST_VERSION: v0.3.1
|
||||
TESTSTAT_VERSION: v0.1.25
|
||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||
@@ -33,6 +42,7 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}\go
|
||||
GOBIN: ${{ github.workspace }}\go\bin
|
||||
@@ -112,7 +122,7 @@ jobs:
|
||||
|
||||
unit-test:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}\go
|
||||
GOBIN: ${{ github.workspace }}\go\bin
|
||||
@@ -190,9 +200,11 @@ jobs:
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
retention-days: 1
|
||||
|
||||
unit-test-report:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
if: always()
|
||||
needs:
|
||||
- unit-test
|
||||
@@ -219,6 +231,7 @@ jobs:
|
||||
|
||||
integration-test-prepare:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
matrix: ${{ steps.tests.outputs.matrix }}
|
||||
steps:
|
||||
@@ -252,8 +265,8 @@ jobs:
|
||||
|
||||
integration-test:
|
||||
runs-on: ${{ inputs.os }}
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
needs:
|
||||
- build
|
||||
- integration-test-prepare
|
||||
@@ -508,9 +521,11 @@ jobs:
|
||||
with:
|
||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||
retention-days: 1
|
||||
|
||||
integration-test-report:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
continue-on-error: ${{ inputs.storage == 'snapshotter' && github.event_name != 'pull_request' }}
|
||||
if: always()
|
||||
needs:
|
||||
|
||||
12
.github/workflows/bin-image.yml
vendored
12
.github/workflows/bin-image.yml
vendored
@@ -1,5 +1,14 @@
|
||||
name: bin-image
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -29,6 +38,7 @@ jobs:
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
outputs:
|
||||
platforms: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
@@ -81,6 +91,7 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare
|
||||
@@ -151,6 +162,7 @@ jobs:
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build
|
||||
if: always() && !contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && github.event_name != 'pull_request' && github.repository == 'moby/moby'
|
||||
|
||||
20
.github/workflows/buildkit.yml
vendored
20
.github/workflows/buildkit.yml
vendored
@@ -1,5 +1,14 @@
|
||||
name: buildkit
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -13,7 +22,8 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.8"
|
||||
GO_VERSION: "1.22.8"
|
||||
ALPINE_VERSION: "3.19"
|
||||
DESTDIR: ./build
|
||||
|
||||
jobs:
|
||||
@@ -22,6 +32,7 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
@@ -47,7 +58,7 @@ jobs:
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- build
|
||||
strategy:
|
||||
@@ -84,6 +95,11 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: moby
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
-
|
||||
name: BuildKit ref
|
||||
run: |
|
||||
|
||||
28
.github/workflows/ci.yml
vendored
28
.github/workflows/ci.yml
vendored
@@ -1,5 +1,14 @@
|
||||
name: ci
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -21,6 +30,7 @@ jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
@@ -51,17 +61,10 @@ jobs:
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.target }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
prepare-cross:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -83,6 +86,7 @@ jobs:
|
||||
|
||||
cross:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare-cross
|
||||
@@ -119,11 +123,3 @@ jobs:
|
||||
name: Check artifacts
|
||||
run: |
|
||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||
-
|
||||
name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cross-${{ env.PLATFORM_PAIR }}
|
||||
path: ${{ env.DESTDIR }}
|
||||
if-no-files-found: error
|
||||
retention-days: 7
|
||||
|
||||
19
.github/workflows/test.yml
vendored
19
.github/workflows/test.yml
vendored
@@ -1,5 +1,14 @@
|
||||
name: test
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -13,7 +22,9 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
GO_VERSION: "1.21.8"
|
||||
GO_VERSION: "1.22.8"
|
||||
GIT_PAGER: "cat"
|
||||
PAGER: "cat"
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
@@ -21,6 +32,7 @@ jobs:
|
||||
|
||||
build-dev:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
strategy:
|
||||
@@ -69,6 +81,7 @@ jobs:
|
||||
|
||||
validate-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -90,7 +103,7 @@ jobs:
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-prepare
|
||||
- build-dev
|
||||
@@ -124,6 +137,7 @@ jobs:
|
||||
|
||||
smoke-prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- validate-dco
|
||||
outputs:
|
||||
@@ -145,6 +159,7 @@ jobs:
|
||||
|
||||
smoke:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
needs:
|
||||
- smoke-prepare
|
||||
strategy:
|
||||
|
||||
74
.github/workflows/validate-pr.yml
vendored
Normal file
74
.github/workflows/validate-pr.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: validate-pr
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, labeled, unlabeled]
|
||||
|
||||
jobs:
|
||||
check-area-label:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
steps:
|
||||
- name: Missing `area/` label
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
||||
run: |
|
||||
echo "::error::Every PR with an 'impact/*' label should also have an 'area/*' label"
|
||||
exit 1
|
||||
- name: OK
|
||||
run: exit 0
|
||||
|
||||
check-changelog:
|
||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/')
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_BODY: |
|
||||
${{ github.event.pull_request.body }}
|
||||
steps:
|
||||
- name: Check changelog description
|
||||
run: |
|
||||
# Extract the `markdown changelog` note code block
|
||||
block=$(echo -n "$PR_BODY" | tr -d '\r' | awk '/^```markdown changelog$/{flag=1;next}/^```$/{flag=0}flag')
|
||||
|
||||
# Strip empty lines
|
||||
desc=$(echo "$block" | awk NF)
|
||||
|
||||
if [ -z "$desc" ]; then
|
||||
echo "::error::Changelog section is empty. Please provide a description for the changelog."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
len=$(echo -n "$desc" | wc -c)
|
||||
if [[ $len -le 6 ]]; then
|
||||
echo "::error::Description looks too short: $desc"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "This PR will be included in the release notes with the following note:"
|
||||
echo "$desc"
|
||||
|
||||
check-pr-branch:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 120 # guardrails timeout for the whole job
|
||||
env:
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
steps:
|
||||
# Backports or PR that target a release branch directly should mention the target branch in the title, for example:
|
||||
# [X.Y backport] Some change that needs backporting to X.Y
|
||||
# [X.Y] Change directly targeting the X.Y branch
|
||||
- name: Get branch from PR title
|
||||
id: title_branch
|
||||
run: echo "$PR_TITLE" | sed -n 's/^\[\([0-9]*\.[0-9]*\)[^]]*\].*/branch=\1/p' >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check release branch
|
||||
if: github.event.pull_request.base.ref != steps.title_branch.outputs.branch && !(github.event.pull_request.base.ref == 'master' && steps.title_branch.outputs.branch == '')
|
||||
run: echo "::error::PR title suggests targetting the ${{ steps.title_branch.outputs.branch }} branch, but is opened against ${{ github.event.pull_request.base.ref }}" && exit 1
|
||||
9
.github/workflows/windows-2019.yml
vendored
9
.github/workflows/windows-2019.yml
vendored
@@ -1,5 +1,14 @@
|
||||
name: windows-2019
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
9
.github/workflows/windows-2022.yml
vendored
9
.github/workflows/windows-2022.yml
vendored
@@ -1,5 +1,14 @@
|
||||
name: windows-2022
|
||||
|
||||
# Default to 'contents: read', which grants actions to read commits.
|
||||
#
|
||||
# If any permission is set, any permission not included in the list is
|
||||
# implicitly set to "none".
|
||||
#
|
||||
# see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
@@ -39,6 +39,11 @@ linters-settings:
|
||||
|
||||
govet:
|
||||
check-shadowing: false
|
||||
|
||||
gosec:
|
||||
excludes:
|
||||
- G115 # FIXME temporarily suppress 'G115: integer overflow conversion': it produces many hits, some of which may be false positives, and need to be looked at; see https://github.com/moby/moby/issues/48358
|
||||
|
||||
depguard:
|
||||
rules:
|
||||
main:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.8
|
||||
ARG GO_VERSION=1.22.8
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
ARG XX_VERSION=1.2.1
|
||||
@@ -141,7 +141,7 @@ RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||
# from the https://github.com/go-delve/delve repository.
|
||||
# It can be used to run Docker with a possibility of
|
||||
# attaching debugger to it.
|
||||
ARG DELVE_VERSION=v1.21.1
|
||||
ARG DELVE_VERSION=v1.23.0
|
||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS delve-build
|
||||
@@ -198,7 +198,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
||||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
ARG CONTAINERD_VERSION=v1.7.22
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
@@ -231,7 +231,7 @@ FROM binary-dummy AS containerd-windows
|
||||
FROM containerd-${TARGETOS} AS containerd
|
||||
|
||||
FROM base AS golangci_lint
|
||||
ARG GOLANGCI_LINT_VERSION=v1.55.2
|
||||
ARG GOLANGCI_LINT_VERSION=v1.60.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
ARG GO_VERSION=1.21.8
|
||||
ARG GO_VERSION=1.22.8
|
||||
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||
|
||||
@@ -161,10 +161,10 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||
# Use PowerShell as the default shell
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
ARG GO_VERSION=1.21.8
|
||||
ARG GO_VERSION=1.22.8
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.1
|
||||
ARG CONTAINERD_VERSION=v1.7.13
|
||||
ARG CONTAINERD_VERSION=v1.7.22
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
|
||||
5
Makefile
5
Makefile
@@ -16,6 +16,9 @@ export VALIDATE_REPO
|
||||
export VALIDATE_BRANCH
|
||||
export VALIDATE_ORIGIN_BRANCH
|
||||
|
||||
export PAGER
|
||||
export GIT_PAGER
|
||||
|
||||
# env vars passed through directly to Docker's build scripts
|
||||
# to allow things like `make KEEPBUNDLE=1 binary` easily
|
||||
# `project/PACKAGERS.md` have some limited documentation of some of these
|
||||
@@ -77,6 +80,8 @@ DOCKER_ENVS := \
|
||||
-e DEFAULT_PRODUCT_LICENSE \
|
||||
-e PRODUCT \
|
||||
-e PACKAGER_NAME \
|
||||
-e PAGER \
|
||||
-e GIT_PAGER \
|
||||
-e OTEL_EXPORTER_OTLP_ENDPOINT \
|
||||
-e OTEL_EXPORTER_OTLP_PROTOCOL \
|
||||
-e OTEL_SERVICE_NAME
|
||||
|
||||
@@ -815,9 +815,11 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
|
||||
if multiplexed && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
|
||||
contentType = types.MediaTypeMultiplexedStream
|
||||
}
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
|
||||
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: %v\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n", contentType)
|
||||
} else {
|
||||
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
// FIXME(thaJeztah): we should not ignore errors here; see https://github.com/moby/moby/pull/48359#discussion_r1725562802
|
||||
fmt.Fprint(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
|
||||
}
|
||||
|
||||
closer := func() error {
|
||||
|
||||
@@ -21,7 +21,7 @@ type grpcRouter struct {
|
||||
// NewRouter initializes a new grpc http router
|
||||
func NewRouter(backends ...Backend) router.Router {
|
||||
unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
|
||||
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor))
|
||||
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor)) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
|
||||
r := &grpcRouter{
|
||||
h2Server: &http2.Server{},
|
||||
@@ -46,7 +46,7 @@ func (gr *grpcRouter) initRoutes() {
|
||||
}
|
||||
|
||||
func unaryInterceptor() grpc.UnaryServerInterceptor {
|
||||
withTrace := otelgrpc.UnaryServerInterceptor()
|
||||
withTrace := otelgrpc.UnaryServerInterceptor() //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||
|
||||
@@ -55,7 +55,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
if p := r.FormValue("platform"); p != "" {
|
||||
sp, err := platforms.Parse(p)
|
||||
if err != nil {
|
||||
return err
|
||||
return errdefs.InvalidParameter(err)
|
||||
}
|
||||
platform = &sp
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||
id, progressErr = ir.backend.ImportImage(ctx, tagRef, platform, comment, layerReader, r.Form["changes"])
|
||||
|
||||
if progressErr == nil {
|
||||
output.Write(streamformatter.FormatStatus("", id.String()))
|
||||
_, _ = output.Write(streamformatter.FormatStatus("", "%v", id.String()))
|
||||
}
|
||||
}
|
||||
if progressErr != nil {
|
||||
|
||||
@@ -224,14 +224,6 @@ func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter,
|
||||
adjustForAPIVersion(v, &service)
|
||||
}
|
||||
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
if versions.LessThan(version, "1.44") {
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
|
||||
@@ -121,11 +121,17 @@ func adjustForAPIVersion(cliVersion string, service *swarm.ServiceSpec) {
|
||||
}
|
||||
|
||||
if versions.LessThan(cliVersion, "1.44") {
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec != nil && service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
if service.TaskTemplate.ContainerSpec != nil {
|
||||
// seccomp, apparmor, and no_new_privs were added in 1.44.
|
||||
if service.TaskTemplate.ContainerSpec.Privileges != nil {
|
||||
service.TaskTemplate.ContainerSpec.Privileges.Seccomp = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.AppArmor = nil
|
||||
service.TaskTemplate.ContainerSpec.Privileges.NoNewPrivileges = false
|
||||
}
|
||||
if service.TaskTemplate.ContainerSpec.Healthcheck != nil {
|
||||
// StartInterval was added in API 1.44
|
||||
service.TaskTemplate.ContainerSpec.Healthcheck.StartInterval = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4938,7 +4938,7 @@ definitions:
|
||||
The version Go used to compile the daemon, and the version of the Go
|
||||
runtime in use.
|
||||
type: "string"
|
||||
example: "go1.13.14"
|
||||
example: "go1.22.7"
|
||||
Os:
|
||||
description: |
|
||||
The operating system that the daemon is running on ("linux" or "windows")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -325,12 +326,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error {
|
||||
if policy.MaximumRetryCount < 0 {
|
||||
msg += " and cannot be negative"
|
||||
}
|
||||
return &errInvalidParameter{fmt.Errorf(msg)}
|
||||
return &errInvalidParameter{errors.New(msg)}
|
||||
}
|
||||
return nil
|
||||
case RestartPolicyOnFailure:
|
||||
if policy.MaximumRetryCount < 0 {
|
||||
return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")}
|
||||
return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")}
|
||||
}
|
||||
return nil
|
||||
case "":
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/docker/docker/builder/builder-next/exporter/overrides"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
@@ -326,7 +327,7 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||
// TODO: remove once opt.Options.Platform is of type specs.Platform
|
||||
_, err := platforms.Parse(opt.Options.Platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
}
|
||||
frontendAttrs["platform"] = opt.Options.Platform
|
||||
}
|
||||
|
||||
@@ -44,6 +44,10 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
|
||||
return nil, errors.Wrap(err, "failed to parse image config for patch")
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return nil, errors.New("null image config")
|
||||
}
|
||||
|
||||
var rootFS ocispec.RootFS
|
||||
rootFS.Type = "layers"
|
||||
rootFS.DiffIDs = append(rootFS.DiffIDs, dps...)
|
||||
|
||||
42
builder/builder-next/exporter/mobyexporter/writer_test.go
Normal file
42
builder/builder-next/exporter/mobyexporter/writer_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package mobyexporter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestPatchImageConfig(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
cfgJSON string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
cfgJSON: "{}",
|
||||
},
|
||||
{
|
||||
name: "history only",
|
||||
cfgJSON: `{"history": []}`,
|
||||
},
|
||||
{
|
||||
name: "rootfs only",
|
||||
cfgJSON: `{"rootfs": {}}`,
|
||||
},
|
||||
{
|
||||
name: "null",
|
||||
cfgJSON: "null",
|
||||
err: "null image config",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, err := patchImageConfig([]byte(tc.cfgJSON), nil, nil, nil)
|
||||
if tc.err == "" {
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func newBuilder(ctx context.Context, options builderOptions) (*Builder, error) {
|
||||
if config.Platform != "" {
|
||||
sp, err := platforms.Parse(config.Platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errdefs.InvalidParameter(err)
|
||||
}
|
||||
b.platform = &sp
|
||||
}
|
||||
|
||||
@@ -166,17 +166,17 @@ func initializeStage(ctx context.Context, d dispatchRequest, cmd *instructions.S
|
||||
|
||||
p, err := platforms.Parse(v)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse platform %s", v)
|
||||
return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse platform %s", v)
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
image, err := d.getFromImage(ctx, d.shlex, cmd.BaseName, platform)
|
||||
img, err := d.getFromImage(ctx, d.shlex, cmd.BaseName, platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state := d.state
|
||||
if err := state.beginStage(cmd.Name, image); err != nil {
|
||||
if err := state.beginStage(cmd.Name, img); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(state.runConfig.OnBuild) > 0 {
|
||||
|
||||
@@ -27,25 +27,25 @@ func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState,
|
||||
|
||||
passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/passwd path in container rootfs")
|
||||
}
|
||||
groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs")
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/group path in container rootfs")
|
||||
}
|
||||
uid, err := lookupUser(userStr, passwdPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't find uid for user "+userStr)
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find uid for user "+userStr)
|
||||
}
|
||||
gid, err := lookupGroup(grpStr, groupPath)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "can't find gid for group "+grpStr)
|
||||
return idtools.Identity{}, errors.Wrap(err, "can't find gid for group "+grpStr)
|
||||
}
|
||||
|
||||
// convert as necessary because of user namespaces
|
||||
chownPair, err := identityMapping.ToHost(idtools.Identity{UID: uid, GID: gid})
|
||||
if err != nil {
|
||||
return idtools.Identity{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping")
|
||||
return idtools.Identity{}, errors.Wrap(err, "unable to convert uid/gid to host mapping")
|
||||
}
|
||||
return chownPair, nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"golang.org/x/sys/windows"
|
||||
@@ -62,7 +63,7 @@ func lookupNTAccount(ctx context.Context, builder *Builder, accountName string,
|
||||
|
||||
optionsPlatform, err := platforms.Parse(builder.options.Platform)
|
||||
if err != nil {
|
||||
return idtools.Identity{}, err
|
||||
return idtools.Identity{}, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
runConfig := copyRunConfig(state.runConfig,
|
||||
|
||||
@@ -44,8 +44,8 @@ func downloadRemote(remoteURL string) (string, io.ReadCloser, error) {
|
||||
// GetWithStatusError does an http.Get() and returns an error if the
|
||||
// status code is 4xx or 5xx.
|
||||
func GetWithStatusError(address string) (resp *http.Response, err error) {
|
||||
// #nosec G107
|
||||
if resp, err = http.Get(address); err != nil {
|
||||
resp, err = http.Get(address) // #nosec G107 -- ignore G107: Potential HTTP request made with variable url
|
||||
if err != nil {
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout {
|
||||
return nil, errdefs.NotFound(err)
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/log"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/spf13/pflag"
|
||||
"go.opentelemetry.io/otel"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/fs"
|
||||
@@ -284,3 +286,29 @@ func TestCDISpecDirs(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestOtelMeterLeak tests for a memory leak in the OTEL meter implementation.
|
||||
// Once the fixed OTEL is vendored, this test will fail - the workaround
|
||||
// and this test should be removed then.
|
||||
func TestOtelMeterLeak(t *testing.T) {
|
||||
meter := otel.Meter("foo")
|
||||
|
||||
var before runtime.MemStats
|
||||
runtime.ReadMemStats(&before)
|
||||
|
||||
const counters = 10 * 1000 * 1000
|
||||
for i := 0; i < counters; i++ {
|
||||
_, _ = meter.Int64Counter("bar")
|
||||
}
|
||||
|
||||
var after runtime.MemStats
|
||||
runtime.ReadMemStats(&after)
|
||||
|
||||
allocs := after.Mallocs - before.Mallocs
|
||||
t.Log("Allocations:", allocs)
|
||||
|
||||
if allocs < 10 {
|
||||
// TODO: Remove Workaround OTEL memory leak in cmd/dockerd/daemon.go
|
||||
t.Fatal("Allocations count decreased. OTEL leak workaround is no longer needed!")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,9 @@ import (
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/term"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
)
|
||||
|
||||
var honorXDG bool
|
||||
@@ -82,6 +85,12 @@ func main() {
|
||||
// Fixes https://github.com/docker/docker/issues/19728
|
||||
signal.Ignore(syscall.SIGPIPE)
|
||||
|
||||
// Workaround OTEL memory leak
|
||||
// See: https://github.com/open-telemetry/opentelemetry-go-contrib/issues/5190
|
||||
// The need for this workaround is checked by the TestOtelMeterLeak test
|
||||
// TODO: Remove this workaround after upgrading to v1.30.0
|
||||
otel.SetMeterProvider(noop.MeterProvider{})
|
||||
|
||||
// Set terminal emulation based on platform as required.
|
||||
_, stdout, stderr := term.StdStreams()
|
||||
onError := func(err error) {
|
||||
|
||||
@@ -14,7 +14,7 @@ func NoArgs(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
if cmd.HasSubCommands() {
|
||||
return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
|
||||
return errors.New("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
|
||||
}
|
||||
|
||||
return errors.Errorf(
|
||||
|
||||
@@ -2,6 +2,7 @@ package stream // import "github.com/docker/docker/container/stream"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
@@ -91,24 +92,24 @@ func (c *Config) NewNopInputPipe() {
|
||||
|
||||
// CloseStreams ensures that the configured streams are properly closed.
|
||||
func (c *Config) CloseStreams() error {
|
||||
var errors []string
|
||||
var errs []string
|
||||
|
||||
if c.stdin != nil {
|
||||
if err := c.stdin.Close(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error close stdin: %s", err))
|
||||
errs = append(errs, fmt.Sprintf("error close stdin: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.stdout.Clean(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error close stdout: %s", err))
|
||||
errs = append(errs, fmt.Sprintf("error close stdout: %s", err))
|
||||
}
|
||||
|
||||
if err := c.stderr.Clean(); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("error close stderr: %s", err))
|
||||
errs = append(errs, fmt.Sprintf("error close stderr: %s", err))
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf(strings.Join(errors, "\n"))
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -279,6 +279,19 @@ func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
|
||||
close(n.done)
|
||||
select {
|
||||
case <-n.ready:
|
||||
// there is a case where a node can be promoted to manager while
|
||||
// another node is leaving the cluster. the node being promoted, by
|
||||
// random chance, picks the IP of the node being demoted as the one it
|
||||
// tries to connect to. in this case, the promotion will fail, and the
|
||||
// whole swarm Node object packs it in.
|
||||
//
|
||||
// when the Node object is relaunched by this code, because it has
|
||||
// joinAddr in the config, it attempts again to connect to the same
|
||||
// no-longer-manager node, and crashes again. this continues forever.
|
||||
//
|
||||
// to avoid this case, in this block, we remove JoinAddr from the
|
||||
// config.
|
||||
n.config.joinAddr = ""
|
||||
n.enableReconnectWatcher()
|
||||
default:
|
||||
if n.repeatedRun {
|
||||
|
||||
@@ -902,7 +902,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
|
||||
// ensureDefaultAppArmorProfile does nothing if apparmor is disabled
|
||||
if err := ensureDefaultAppArmorProfile(); err != nil {
|
||||
log.G(ctx).Errorf(err.Error())
|
||||
log.G(ctx).WithError(err).Error("Failed to ensure default apparmor profile is loaded")
|
||||
}
|
||||
|
||||
daemonRepo := filepath.Join(cfgStore.Root, "containers")
|
||||
@@ -962,8 +962,8 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||
// TODO(stevvooe): We may need to allow configuration of this on the client.
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
||||
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),
|
||||
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),
|
||||
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
}
|
||||
|
||||
if cfgStore.ContainerdAddr != "" {
|
||||
|
||||
@@ -852,6 +852,10 @@ func (daemon *Daemon) initNetworkController(cfg *config.Config, activeSandboxes
|
||||
return err
|
||||
}
|
||||
|
||||
if err := daemon.netController.SetupUserChains(); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Warnf("initNetworkController")
|
||||
}
|
||||
|
||||
// Set HostGatewayIP to the default bridge's IP if it is empty
|
||||
setHostGatewayIP(daemon.netController, cfg)
|
||||
return nil
|
||||
|
||||
@@ -42,9 +42,14 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image.
|
||||
layerCounter++
|
||||
}
|
||||
|
||||
var created int64
|
||||
if h.Created != nil {
|
||||
created = h.Created.Unix()
|
||||
}
|
||||
|
||||
history = append([]*image.HistoryResponseItem{{
|
||||
ID: "<missing>",
|
||||
Created: h.Created.Unix(),
|
||||
Created: created,
|
||||
CreatedBy: h.CreatedBy,
|
||||
Comment: h.Comment,
|
||||
Size: layerSize,
|
||||
|
||||
@@ -406,12 +406,11 @@ func newAWSLogsClient(info logger.Info, configOpts ...func(*config.LoadOptions)
|
||||
clientOpts = append(
|
||||
clientOpts,
|
||||
cloudwatchlogs.WithAPIOptions(middleware.AddUserAgentKeyValue("Docker", dockerversion.Version)),
|
||||
func(o *cloudwatchlogs.Options) {
|
||||
o.BaseEndpoint = endpoint
|
||||
},
|
||||
)
|
||||
|
||||
if endpoint != nil {
|
||||
clientOpts = append(clientOpts, cloudwatchlogs.WithEndpointResolver(cloudwatchlogs.EndpointResolverFromURL(*endpoint)))
|
||||
}
|
||||
|
||||
client := cloudwatchlogs.NewFromConfig(cfg, clientOpts...)
|
||||
|
||||
return client, nil
|
||||
|
||||
@@ -66,7 +66,7 @@ func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io
|
||||
}
|
||||
|
||||
if msgLen != binary.BigEndian.Uint32(buf) {
|
||||
return nil, 0, errdefs.DataLoss(errors.Wrap(err, "log message header and footer indicate different message sizes"))
|
||||
return nil, 0, errdefs.DataLoss(errors.New("log message header and footer indicate different message sizes"))
|
||||
}
|
||||
|
||||
found++
|
||||
|
||||
@@ -51,12 +51,15 @@ func setNvidiaGPUs(s *specs.Spec, dev *deviceInstance) error {
|
||||
return errConflictCountDeviceIDs
|
||||
}
|
||||
|
||||
if len(req.DeviceIDs) > 0 {
|
||||
switch {
|
||||
case len(req.DeviceIDs) > 0:
|
||||
s.Process.Env = append(s.Process.Env, "NVIDIA_VISIBLE_DEVICES="+strings.Join(req.DeviceIDs, ","))
|
||||
} else if req.Count > 0 {
|
||||
case req.Count > 0:
|
||||
s.Process.Env = append(s.Process.Env, "NVIDIA_VISIBLE_DEVICES="+countToDevices(req.Count))
|
||||
} else if req.Count < 0 {
|
||||
case req.Count < 0:
|
||||
s.Process.Env = append(s.Process.Env, "NVIDIA_VISIBLE_DEVICES=all")
|
||||
case req.Count == 0:
|
||||
s.Process.Env = append(s.Process.Env, "NVIDIA_VISIBLE_DEVICES=void")
|
||||
}
|
||||
|
||||
var nvidiaCaps []string
|
||||
|
||||
@@ -15,7 +15,7 @@ set -e
|
||||
# the binary version you may also need to update the vendor version to pick up
|
||||
# bug fixes or new APIs, however, usually the Go packages are built from a
|
||||
# commit from the master branch.
|
||||
: "${CONTAINERD_VERSION:=v1.7.13}"
|
||||
: "${CONTAINERD_VERSION:=v1.7.22}"
|
||||
|
||||
install_containerd() (
|
||||
echo "Install containerd version $CONTAINERD_VERSION"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.21.8
|
||||
ARG GO_VERSION=1.22.8
|
||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||
ARG PROTOC_VERSION=3.11.4
|
||||
|
||||
|
||||
@@ -78,6 +78,18 @@ source "${MAKEDIR}/.go-autogen"
|
||||
GCFLAGS="all=-N -l"
|
||||
fi
|
||||
|
||||
if [ "$(go env GOARCH)" = "arm" ] && [ "$(go env GOARM)" = "5" ]; then
|
||||
# cross-compiling for arm/v5 fails on go1.22; a fix is included for this
|
||||
# in go1.23 (https://github.com/golang/go/issues/65290), but for go1.22
|
||||
# we can set the correct option manually.
|
||||
CGO_CFLAGS+=" -Wno-atomic-alignment"
|
||||
export CGO_CFLAGS
|
||||
|
||||
# Make sure libatomic is included on arm/v5, because clang does not auto-link it.
|
||||
# see https://github.com/moby/moby/pull/46982#issuecomment-2206992611
|
||||
export CGO_LDFLAGS="-latomic"
|
||||
fi
|
||||
|
||||
echo "Building $([ "$DOCKER_STATIC" = "1" ] && echo "static" || echo "dynamic") $DEST/$BINARY_FULLNAME ($PLATFORM_NAME)..."
|
||||
if [ -n "$DOCKER_DEBUG" ]; then
|
||||
set -x
|
||||
|
||||
@@ -223,8 +223,6 @@ func (s *saveSession) save(outStream io.Writer) error {
|
||||
})
|
||||
}
|
||||
|
||||
imgPlat := imageDescr.image.Platform()
|
||||
|
||||
m := ocispec.Manifest{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
@@ -234,7 +232,6 @@ func (s *saveSession) save(outStream io.Writer) error {
|
||||
MediaType: ocispec.MediaTypeImageConfig,
|
||||
Digest: digest.Digest(imageDescr.image.ID()),
|
||||
Size: int64(len(imageDescr.image.RawJSON())),
|
||||
Platform: &imgPlat,
|
||||
},
|
||||
Layers: foreign,
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -46,7 +46,7 @@ func (s *DockerBenchmarkSuite) BenchmarkConcurrentContainerActions(c *testing.B)
|
||||
args = append(args, sleepCommandForDaemonPlatform()...)
|
||||
out, _, err := dockerCmdWithError(args...)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
chErr <- errors.New(out)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -59,29 +59,29 @@ func (s *DockerBenchmarkSuite) BenchmarkConcurrentContainerActions(c *testing.B)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
chErr <- errors.New(out)
|
||||
return
|
||||
}
|
||||
|
||||
out, _, err = dockerCmdWithError("kill", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
chErr <- errors.New(out)
|
||||
}
|
||||
|
||||
out, _, err = dockerCmdWithError("start", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
chErr <- errors.New(out)
|
||||
}
|
||||
|
||||
out, _, err = dockerCmdWithError("kill", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
chErr <- errors.New(out)
|
||||
}
|
||||
|
||||
// don't do an rm -f here since it can potentially ignore errors from the graphdriver
|
||||
out, _, err = dockerCmdWithError("rm", id)
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
chErr <- errors.New(out)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -91,7 +91,7 @@ func (s *DockerBenchmarkSuite) BenchmarkConcurrentContainerActions(c *testing.B)
|
||||
for i := 0; i < numIterations; i++ {
|
||||
out, _, err := dockerCmdWithError("ps")
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf(out)
|
||||
chErr <- errors.New(out)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -116,7 +116,7 @@ func (s *DockerBenchmarkSuite) BenchmarkLogsCLIRotateFollow(c *testing.B) {
|
||||
ch <- nil
|
||||
out, _, _ := dockerCmdWithError("logs", "-f", id)
|
||||
// if this returns at all, it's an error
|
||||
ch <- fmt.Errorf(out)
|
||||
ch <- errors.New(out)
|
||||
}()
|
||||
|
||||
<-ch
|
||||
|
||||
@@ -60,16 +60,18 @@ func (s *DockerNetworkSuite) SetUpSuite(ctx context.Context, c *testing.T) {
|
||||
setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIPAMDriver)
|
||||
}
|
||||
|
||||
func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ipamDrv string) {
|
||||
func setupRemoteNetworkDrivers(t *testing.T, mux *http.ServeMux, url, netDrv, ipamDrv string) {
|
||||
mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType)
|
||||
_, err := fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
// Network driver implementation
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"Scope":"local"}`)
|
||||
_, err := fmt.Fprint(w, `{"Scope":"local"}`)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -79,17 +81,20 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err = fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`)
|
||||
_, err := fmt.Fprint(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -99,23 +104,28 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
||||
LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0",
|
||||
}
|
||||
if err := netlink.LinkAdd(veth); err != nil {
|
||||
fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`)
|
||||
_, err = fmt.Fprintf(w, `{"Error":"failed to add veth pair: %v"}`, err)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`)
|
||||
_, err = fmt.Fprint(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
if link, err := netlink.LinkByName("cnt0"); err == nil {
|
||||
netlink.LinkDel(link)
|
||||
err = netlink.LinkDel(link)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
// IPAM Driver implementation
|
||||
@@ -124,16 +134,19 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
||||
poolReleaseReq remoteipam.ReleasePoolRequest
|
||||
addressRequest remoteipam.RequestAddressRequest
|
||||
addressReleaseReq remoteipam.ReleaseAddressRequest
|
||||
lAS = "localAS"
|
||||
gAS = "globalAS"
|
||||
pool = "172.28.0.0/16"
|
||||
poolID = lAS + "/" + pool
|
||||
gw = "172.28.255.254/16"
|
||||
)
|
||||
const (
|
||||
lAS = "localAS"
|
||||
gAS = "globalAS"
|
||||
pool = "172.28.0.0/16"
|
||||
poolID = lAS + "/" + pool
|
||||
gw = "172.28.255.254/16"
|
||||
)
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -144,11 +157,14 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
||||
}
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS {
|
||||
fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`)
|
||||
assert.NilError(t, err)
|
||||
} else if poolRequest.Pool != "" && poolRequest.Pool != pool {
|
||||
fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"Cannot handle explicit pool requests yet"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -161,11 +177,14 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
// make sure libnetwork is now querying on the expected pool id
|
||||
if addressRequest.PoolID != poolID {
|
||||
fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown pool id"}`)
|
||||
assert.NilError(t, err)
|
||||
} else if addressRequest.Address != "" {
|
||||
fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"Cannot handle explicit address requests yet"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, `{"Address":"`+gw+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"Address":"`+gw+`"}`)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -178,11 +197,14 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
// make sure libnetwork is now asking to release the expected address from the expected poolid
|
||||
if addressRequest.PoolID != poolID {
|
||||
fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown pool id"}`)
|
||||
assert.NilError(t, err)
|
||||
} else if addressReleaseReq.Address != gw {
|
||||
fmt.Fprintf(w, `{"Error":"unknown address"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown address"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -195,22 +217,24 @@ func setupRemoteNetworkDrivers(c *testing.T, mux *http.ServeMux, url, netDrv, ip
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
// make sure libnetwork is now asking to release the expected poolid
|
||||
if addressRequest.PoolID != poolID {
|
||||
fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown pool id"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
err := os.MkdirAll("/etc/docker/plugins", 0o755)
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(t, err)
|
||||
|
||||
fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv)
|
||||
err = os.WriteFile(fileName, []byte(url), 0o644)
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv)
|
||||
err = os.WriteFile(ipamFileName, []byte(url), 0o644)
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func (s *DockerNetworkSuite) TearDownSuite(ctx context.Context, c *testing.T) {
|
||||
@@ -509,9 +533,9 @@ func (s *DockerCLINetworkSuite) TestDockerInspectNetworkWithContainerName(c *tes
|
||||
err := json.Unmarshal([]byte(out), &networkResources)
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, len(networkResources), 1)
|
||||
container, ok := networkResources[0].Containers[containerID]
|
||||
ctr, ok := networkResources[0].Containers[containerID]
|
||||
assert.Assert(c, ok)
|
||||
assert.Equal(c, container.Name, "testNetInspect1")
|
||||
assert.Equal(c, ctr.Name, "testNetInspect1")
|
||||
|
||||
// rename container and check docker inspect output update
|
||||
newName := "HappyNewName"
|
||||
@@ -826,12 +850,12 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *
|
||||
assert.NilError(c, err)
|
||||
assert.Equal(c, hosts, hostsPost, fmt.Sprintf("Unexpected %s change on second container creation", hostsFile))
|
||||
// but discovery is on when connecting to non default bridge network
|
||||
network := "anotherbridge"
|
||||
out, err = s.d.Cmd("network", "create", network)
|
||||
nw := "anotherbridge"
|
||||
out, err = s.d.Cmd("network", "create", nw)
|
||||
assert.NilError(c, err, out)
|
||||
defer s.d.Cmd("network", "rm", network)
|
||||
defer s.d.Cmd("network", "rm", nw)
|
||||
|
||||
out, err = s.d.Cmd("network", "connect", network, cid1)
|
||||
out, err = s.d.Cmd("network", "connect", nw, cid1)
|
||||
assert.NilError(c, err, out)
|
||||
|
||||
hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile)
|
||||
@@ -898,15 +922,15 @@ func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *testin
|
||||
// Legacy Link feature must work only on default network, and not across networks
|
||||
cnt1 := "container1"
|
||||
cnt2 := "container2"
|
||||
network := "anotherbridge"
|
||||
nw := "anotherbridge"
|
||||
|
||||
// Run first container on default network
|
||||
cli.DockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top")
|
||||
|
||||
// Create another network and run the second container on it
|
||||
cli.DockerCmd(c, "network", "create", network)
|
||||
assertNwIsAvailable(c, network)
|
||||
cli.DockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top")
|
||||
cli.DockerCmd(c, "network", "create", nw)
|
||||
assertNwIsAvailable(c, nw)
|
||||
cli.DockerCmd(c, "run", "-d", "--net", nw, "--name", cnt2, "busybox", "top")
|
||||
|
||||
// Try launching a container on default network, linking to the first container. Must succeed
|
||||
cli.DockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top")
|
||||
@@ -1735,12 +1759,12 @@ func (s *DockerNetworkSuite) TestDockerNetworkValidateIP(c *testing.T) {
|
||||
// Test case for 26220
|
||||
func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *testing.T) {
|
||||
out := cli.DockerCmd(c, "network", "inspect", "--format", "{{.Id}}", "bridge").Stdout()
|
||||
network := strings.TrimSpace(out)
|
||||
nw := strings.TrimSpace(out)
|
||||
|
||||
name := "test"
|
||||
cli.DockerCmd(c, "create", "--name", name, "busybox", "top")
|
||||
|
||||
_, _, err := dockerCmdWithError("network", "disconnect", network, name)
|
||||
_, _, err := dockerCmdWithError("network", "disconnect", nw, name)
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -250,11 +249,8 @@ func (s *DockerCLIRunSuite) TestRunAttachDetachFromConfig(c *testing.T) {
|
||||
os.Mkdir(dotDocker, 0o600)
|
||||
tmpCfg := filepath.Join(dotDocker, "config.json")
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
c.Setenv("USERPROFILE", tmpDir)
|
||||
} else {
|
||||
c.Setenv("HOME", tmpDir)
|
||||
}
|
||||
// TODO(thaJeztah): migrate this test to docker/cli, and run on Windows as well (using USERPROFILE for home-dir)
|
||||
c.Setenv("HOME", tmpDir)
|
||||
|
||||
data := `{
|
||||
"detachKeys": "ctrl-a,a"
|
||||
@@ -334,11 +330,8 @@ func (s *DockerCLIRunSuite) TestRunAttachDetachKeysOverrideConfig(c *testing.T)
|
||||
os.Mkdir(dotDocker, 0o600)
|
||||
tmpCfg := filepath.Join(dotDocker, "config.json")
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
c.Setenv("USERPROFILE", tmpDir)
|
||||
} else {
|
||||
c.Setenv("HOME", tmpDir)
|
||||
}
|
||||
// TODO(thaJeztah): migrate this test to docker/cli, and run on Windows as well (using USERPROFILE for home-dir)
|
||||
c.Setenv("HOME", tmpDir)
|
||||
|
||||
data := `{
|
||||
"detachKeys": "ctrl-e,e"
|
||||
|
||||
@@ -384,7 +384,8 @@ func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *testing.T) {
|
||||
out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
cID := strings.TrimSpace(out)
|
||||
d.WaitRun(cID)
|
||||
err = d.WaitRun(cID)
|
||||
assert.NilError(c, err)
|
||||
|
||||
out, err = d.Cmd("rm", "-f", cID)
|
||||
assert.NilError(c, err, out)
|
||||
@@ -638,16 +639,18 @@ const (
|
||||
globalIPAMPlugin = "global-ipam-plugin"
|
||||
)
|
||||
|
||||
func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDrv, ipamDrv string) {
|
||||
func setupRemoteGlobalNetworkPlugin(t *testing.T, mux *http.ServeMux, url, netDrv, ipamDrv string) {
|
||||
mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType)
|
||||
_, err := fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
// Network driver implementation
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"Scope":"global"}`)
|
||||
_, err := fmt.Fprint(w, `{"Scope":"global"}`)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.AllocateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -657,12 +660,14 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err = fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.FreeNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -672,17 +677,20 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err = fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`)
|
||||
_, err := fmt.Fprint(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -692,23 +700,28 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0",
|
||||
}
|
||||
if err := netlink.LinkAdd(veth); err != nil {
|
||||
fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`)
|
||||
_, err = fmt.Fprint(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`)
|
||||
_, err = fmt.Fprint(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
if link, err := netlink.LinkByName("cnt0"); err == nil {
|
||||
netlink.LinkDel(link)
|
||||
err := netlink.LinkDel(link)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
// IPAM Driver implementation
|
||||
@@ -717,16 +730,19 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
poolReleaseReq remoteipam.ReleasePoolRequest
|
||||
addressRequest remoteipam.RequestAddressRequest
|
||||
addressReleaseReq remoteipam.ReleaseAddressRequest
|
||||
lAS = "localAS"
|
||||
gAS = "globalAS"
|
||||
pool = "172.28.0.0/16"
|
||||
poolID = lAS + "/" + pool
|
||||
gw = "172.28.255.254/16"
|
||||
)
|
||||
const (
|
||||
lAS = "localAS"
|
||||
gAS = "globalAS"
|
||||
pool = "172.28.0.0/16"
|
||||
poolID = lAS + "/" + pool
|
||||
gw = "172.28.255.254/16"
|
||||
)
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`)
|
||||
assert.NilError(t, err)
|
||||
})
|
||||
|
||||
mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -737,11 +753,14 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
}
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS {
|
||||
fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`)
|
||||
assert.NilError(t, err)
|
||||
} else if poolRequest.Pool != "" && poolRequest.Pool != pool {
|
||||
fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"Cannot handle explicit pool requests yet"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -754,11 +773,14 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
// make sure libnetwork is now querying on the expected pool id
|
||||
if addressRequest.PoolID != poolID {
|
||||
fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown pool id"}`)
|
||||
assert.NilError(t, err)
|
||||
} else if addressRequest.Address != "" {
|
||||
fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"Cannot handle explicit address requests yet"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, `{"Address":"`+gw+`"}`)
|
||||
_, err := fmt.Fprint(w, `{"Address":"`+gw+`"}`)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -771,11 +793,14 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
// make sure libnetwork is now asking to release the expected address from the expected poolid
|
||||
if addressRequest.PoolID != poolID {
|
||||
fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown pool id"}`)
|
||||
assert.NilError(t, err)
|
||||
} else if addressReleaseReq.Address != gw {
|
||||
fmt.Fprintf(w, `{"Error":"unknown address"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown address"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -788,22 +813,24 @@ func setupRemoteGlobalNetworkPlugin(c *testing.T, mux *http.ServeMux, url, netDr
|
||||
w.Header().Set("Content-Type", plugins.VersionMimetype)
|
||||
// make sure libnetwork is now asking to release the expected poolid
|
||||
if addressRequest.PoolID != poolID {
|
||||
fmt.Fprintf(w, `{"Error":"unknown pool id"}`)
|
||||
_, err := fmt.Fprint(w, `{"Error":"unknown pool id"}`)
|
||||
assert.NilError(t, err)
|
||||
} else {
|
||||
fmt.Fprintf(w, "null")
|
||||
_, err := fmt.Fprint(w, "null")
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
err := os.MkdirAll("/etc/docker/plugins", 0o755)
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(t, err)
|
||||
|
||||
fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv)
|
||||
err = os.WriteFile(fileName, []byte(url), 0o644)
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv)
|
||||
err = os.WriteFile(ipamFileName, []byte(url), 0o644)
|
||||
assert.NilError(c, err)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *testing.T) {
|
||||
|
||||
@@ -439,7 +439,7 @@ func pollCheck(t *testing.T, f checkF, compare func(x interface{}) assert.BoolOr
|
||||
default:
|
||||
panic(fmt.Errorf("pollCheck: type %T not implemented", r))
|
||||
}
|
||||
return poll.Continue(comment)
|
||||
return poll.Continue("%v", comment)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -388,6 +388,7 @@ func TestLiveRestore(t *testing.T) {
|
||||
|
||||
t.Run("volume references", testLiveRestoreVolumeReferences)
|
||||
t.Run("autoremove", testLiveRestoreAutoRemove)
|
||||
t.Run("user chains", testLiveRestoreUserChainsSetup)
|
||||
}
|
||||
|
||||
func testLiveRestoreAutoRemove(t *testing.T) {
|
||||
@@ -606,6 +607,34 @@ func testLiveRestoreVolumeReferences(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func testLiveRestoreUserChainsSetup(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRootless(), "rootless daemon uses it's own network namespace")
|
||||
|
||||
t.Parallel()
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
t.Run("user chains should be inserted", func(t *testing.T) {
|
||||
d := daemon.New(t)
|
||||
d.StartWithBusybox(ctx, t, "--live-restore")
|
||||
t.Cleanup(func() {
|
||||
d.Stop(t)
|
||||
d.Cleanup(t)
|
||||
})
|
||||
|
||||
c := d.NewClientT(t)
|
||||
|
||||
cID := container.Run(ctx, t, c, container.WithCmd("top"))
|
||||
defer c.ContainerRemove(ctx, cID, containertypes.RemoveOptions{Force: true})
|
||||
|
||||
d.Stop(t)
|
||||
icmd.RunCommand("iptables", "--flush", "FORWARD").Assert(t, icmd.Success)
|
||||
d.Start(t, "--live-restore")
|
||||
|
||||
result := icmd.RunCommand("iptables", "-S", "FORWARD", "1")
|
||||
assert.Check(t, is.Equal(strings.TrimSpace(result.Stdout()), "-A FORWARD -j DOCKER-USER"), "the jump to DOCKER-USER should be the first rule in the FORWARD chain")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDaemonDefaultBridgeWithFixedCidrButNoBip(t *testing.T) {
|
||||
skip.If(t, runtime.GOOS == "windows")
|
||||
|
||||
|
||||
@@ -10,10 +10,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/testutil"
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
@@ -178,7 +180,8 @@ func TestImportWithCustomPlatformReject(t *testing.T) {
|
||||
reference,
|
||||
types.ImageImportOptions{Platform: tc.platform})
|
||||
|
||||
assert.ErrorContains(t, err, tc.expectedErr)
|
||||
assert.Check(t, is.ErrorType(err, errdefs.IsInvalidParameter))
|
||||
assert.Check(t, is.ErrorContains(err, tc.expectedErr))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,8 +18,6 @@ import (
|
||||
func TestDiskUsage(t *testing.T) {
|
||||
skip.If(t, testEnv.DaemonInfo.OSType == "windows") // d.Start fails on Windows with `protocol not available`
|
||||
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
d := daemon.New(t)
|
||||
@@ -38,7 +36,18 @@ func TestDiskUsage(t *testing.T) {
|
||||
next: func(t *testing.T, _ types.DiskUsage) types.DiskUsage {
|
||||
du, err := client.DiskUsage(ctx, types.DiskUsageOptions{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
expectedLayersSize := int64(0)
|
||||
// TODO: Investigate https://github.com/moby/moby/issues/47119
|
||||
// Make 4096 (block size) also a valid value for zero usage.
|
||||
if testEnv.UsingSnapshotter() && testEnv.IsRootless() {
|
||||
if du.LayersSize == 4096 {
|
||||
expectedLayersSize = du.LayersSize
|
||||
}
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, du, types.DiskUsage{
|
||||
LayersSize: expectedLayersSize,
|
||||
Images: []*image.Summary{},
|
||||
Containers: []*types.Container{},
|
||||
Volumes: []*volume.Volume{},
|
||||
|
||||
@@ -199,11 +199,11 @@ func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID {
|
||||
return parent
|
||||
}
|
||||
if parent == "" {
|
||||
return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...)
|
||||
return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) // #nosec G602 -- slice index out of range, which is a false positive
|
||||
}
|
||||
// H = "H(n-1) SHA256(n)"
|
||||
dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
|
||||
return createChainIDFromParent(ChainID(dgst), dgsts[1:]...)
|
||||
dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) // #nosec G602 -- slice index out of range, which is a false positive
|
||||
return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) // #nosec G602 -- slice index out of range, which is a false positive
|
||||
}
|
||||
|
||||
// ReleaseAndLog releases the provided layer from the given layer
|
||||
|
||||
@@ -302,8 +302,8 @@ func (r *remote) monitorDaemon(ctx context.Context) {
|
||||
r.GRPC.Address,
|
||||
containerd.WithTimeout(60*time.Second),
|
||||
containerd.WithDialOpts([]grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),
|
||||
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),
|
||||
grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
||||
|
||||
@@ -707,17 +707,24 @@ addToStore:
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Sets up the DOCKER-USER chain for each iptables version (IPv4, IPv6)
|
||||
// that's enabled in the controller's configuration.
|
||||
for _, ipVersion := range c.enabledIptablesVersions() {
|
||||
if err := setupUserChain(ipVersion); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Warnf("Controller.NewNetwork %s:", name)
|
||||
}
|
||||
if err := c.SetupUserChains(); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Warnf("Controller.NewNetwork %s:", name)
|
||||
}
|
||||
|
||||
return nw, nil
|
||||
}
|
||||
|
||||
// Sets up the DOCKER-USER chain for each iptables version (IPv4, IPv6) that's
|
||||
// enabled in the controller's configuration.
|
||||
func (c *Controller) SetupUserChains() error {
|
||||
for _, ipVersion := range c.enabledIptablesVersions() {
|
||||
if err := setupUserChain(ipVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var joinCluster NetworkWalker = func(nw *Network) bool {
|
||||
if nw.configOnly {
|
||||
return false
|
||||
@@ -1045,7 +1052,7 @@ func (c *Controller) loadDriver(networkType string) error {
|
||||
|
||||
if err != nil {
|
||||
if errors.Cause(err) == plugins.ErrNotFound {
|
||||
return types.NotFoundErrorf(err.Error())
|
||||
return types.NotFoundErrorf("%v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1064,7 +1071,7 @@ func (c *Controller) loadIPAMDriver(name string) error {
|
||||
|
||||
if err != nil {
|
||||
if errors.Cause(err) == plugins.ErrNotFound {
|
||||
return types.NotFoundErrorf(err.Error())
|
||||
return types.NotFoundErrorf("%v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -373,7 +373,7 @@ func setINC(version iptables.IPVersion, iface string, enable bool) error {
|
||||
log.G(context.TODO()).Warnf("Failed to rollback iptables rule after failure (%v): %v", err, err2)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(msg)
|
||||
return errors.New(msg)
|
||||
}
|
||||
log.G(context.TODO()).Warn(msg)
|
||||
}
|
||||
|
||||
@@ -3,18 +3,14 @@
|
||||
package libnetwork
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork/resolvconf"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func TestDNSOptions(t *testing.T) {
|
||||
skip.If(t, runtime.GOOS == "windows", "test only works on linux")
|
||||
|
||||
c, err := New()
|
||||
assert.NilError(t, err)
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ func processReturn(r io.Reader) error {
|
||||
return fmt.Errorf("failed to read buf in processReturn : %v", err)
|
||||
}
|
||||
if string(buf[0:n]) != success {
|
||||
return fmt.Errorf(string(buf[0:n]))
|
||||
return fmt.Errorf("%s", buf[0:n])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
2
man/.gitignore
vendored
Normal file
2
man/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/man*
|
||||
/.build
|
||||
49
man/Makefile
Normal file
49
man/Makefile
Normal file
@@ -0,0 +1,49 @@
|
||||
prefix = /usr/local
|
||||
mandir = $(prefix)/man
|
||||
INSTALL = install
|
||||
INSTALL_DATA = ${INSTALL} -m 644
|
||||
|
||||
# By default, the man pages are generated using a copy of go-md2man built from
|
||||
# the vendored sources in this directory. This behavior can be overridden by
|
||||
# setting the GO_MD2MAN variable to the name/path of an existing go-md2man
|
||||
# binary.
|
||||
GO_MD2MAN ?= .build/go-md2man
|
||||
|
||||
ALL_PAGES := $(wildcard *.*.md)
|
||||
|
||||
# Determine which manual sections we are generating pages for
|
||||
# by isolating the last part of the filename before the extension
|
||||
# and eliminating duplicates.
|
||||
man_section = $(lastword $(subst ., ,$(1)))
|
||||
sections := $(sort $(foreach page,$(ALL_PAGES:.md=),$(call man_section,$(page))))
|
||||
|
||||
# Dynamically generate pattern rules for each manual section
|
||||
# so make knows how to build a target like man8/dockerd.8.
|
||||
define MANPAGE_template
|
||||
man$(1)/%.$(1): %.$(1).md $(if $(findstring file,$(origin GO_MD2MAN)),$(GO_MD2MAN)) | man$(1)
|
||||
$(GO_MD2MAN) -in $$< -out $$@
|
||||
endef
|
||||
$(foreach sec,$(sections),$(eval $(call MANPAGE_template,$(sec))))
|
||||
|
||||
# Default target: build all man pages.
|
||||
all: $(foreach page,$(ALL_PAGES:.md=),man$(call man_section,$(page))/$(page))
|
||||
|
||||
# Target for creating the man{1..8} directories as needed.
|
||||
.PRECIOUS: man%
|
||||
man%:
|
||||
-mkdir $@
|
||||
|
||||
.PHONY: install
|
||||
install: all
|
||||
@set -ex; \
|
||||
for sec in $(sections); do \
|
||||
$(INSTALL) -d $(DESTDIR)$(mandir)/man$$sec && \
|
||||
$(INSTALL_DATA) man$$sec/* $(DESTDIR)$(mandir)/man$$sec; \
|
||||
done
|
||||
|
||||
.build/go-md2man: go.mod go.sum
|
||||
GO111MODULE=auto go build -o $@ github.com/cpuguy83/go-md2man/v2
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -r man* .build
|
||||
25
man/README.md
Normal file
25
man/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
Docker Engine Documentation
|
||||
===========================
|
||||
|
||||
The man pages for Docker Engine are generated from the markdown sources and tooling in this directory.
|
||||
|
||||
## Generate the man pages
|
||||
|
||||
Run `make` from within this directory.
|
||||
A Go toolchain is required.
|
||||
The generated man pages will be placed in man*N* subdirectories, where *N* is the manual section number.
|
||||
|
||||
## Install the man pages
|
||||
|
||||
Run `make install` from within this directory.
|
||||
The make variables `prefix`, `mandir`, `INSTALL`, `INSTALL_DATA` and `DESTDIR`
|
||||
are supported for customizing the installation.
|
||||
|
||||
## Add a new man page
|
||||
|
||||
Create a new Markdown file in this directory with a filename *TITLE*.*SECTION*.md,
|
||||
where *TITLE* is the man page title and *SECTION* is the section number.
|
||||
The Makefile will pick it up automatically.
|
||||
|
||||
The Makefile ignores Markdown files that do not match the glob `*.*.md`,
|
||||
allowing non-manpage documentation (like this README file) to coexist.
|
||||
505
man/dockerd.8.md
Normal file
505
man/dockerd.8.md
Normal file
@@ -0,0 +1,505 @@
|
||||
% "DOCKERD" "8" "SEPTEMBER 2015" "Docker Community" "Docker User Manuals"
|
||||
|
||||
# NAME
|
||||
dockerd - Enable daemon mode
|
||||
|
||||
# SYNOPSIS
|
||||
**dockerd**
|
||||
[**--add-runtime**[=*[]*]]
|
||||
[**--allow-nondistributable-artifacts**[=*[]*]]
|
||||
[**--api-cors-header**=[=*API-CORS-HEADER*]]
|
||||
[**--authorization-plugin**[=*[]*]]
|
||||
[**-b**|**--bridge**[=*BRIDGE*]]
|
||||
[**--bip**[=*BIP*]]
|
||||
[**--cgroup-parent**[=*[]*]]
|
||||
[**--config-file**[=*path*]]
|
||||
[**--containerd**[=*SOCKET-PATH*]]
|
||||
[**--data-root**[=*/var/lib/docker*]]
|
||||
[**-D**|**--debug**]
|
||||
[**--default-cgroupns-mode**[=*host*]]
|
||||
[**--default-gateway**[=*DEFAULT-GATEWAY*]]
|
||||
[**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]]
|
||||
[**--default-address-pool**[=*DEFAULT-ADDRESS-POOL*]]
|
||||
[**--default-network-opt**[=*DRIVER=OPT=VALUE*]]
|
||||
[**--default-runtime**[=*runc*]]
|
||||
[**--default-ipc-mode**=*MODE*]
|
||||
[**--default-shm-size**[=*64MiB*]]
|
||||
[**--default-ulimit**[=*[]*]]
|
||||
[**--dns**[=*[]*]]
|
||||
[**--dns-opt**[=*[]*]]
|
||||
[**--dns-search**[=*[]*]]
|
||||
[**--exec-opt**[=*[]*]]
|
||||
[**--exec-root**[=*/var/run/docker*]]
|
||||
[**--experimental**[=**false**]]
|
||||
[**--fixed-cidr**[=*FIXED-CIDR*]]
|
||||
[**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]]
|
||||
[**-G**|**--group**[=*docker*]]
|
||||
[**-H**|**--host**[=*[]*]]
|
||||
[**--help**]
|
||||
[**--http-proxy**[*""*]]
|
||||
[**--https-proxy**[*""*]]
|
||||
[**--icc**[=**true**]]
|
||||
[**--init**[=**false**]]
|
||||
[**--init-path**[=*""*]]
|
||||
[**--insecure-registry**[=*[]*]]
|
||||
[**--ip**[=*0.0.0.0*]]
|
||||
[**--ip-forward**[=**true**]]
|
||||
[**--ip-masq**[=**true**]]
|
||||
[**--iptables**[=**true**]]
|
||||
[**--ipv6**]
|
||||
[**--isolation**[=*default*]]
|
||||
[**-l**|**--log-level**[=*info*]]
|
||||
[**--label**[=*[]*]]
|
||||
[**--live-restore**[=**false**]]
|
||||
[**--log-driver**[=*json-file*]]
|
||||
[**--log-format**="*text*|*json*"]
|
||||
[**--log-opt**[=*map[]*]]
|
||||
[**--mtu**[=*0*]]
|
||||
[**--max-concurrent-downloads**[=*3*]]
|
||||
[**--max-concurrent-uploads**[=*5*]]
|
||||
[**--max-download-attempts**[=*5*]]
|
||||
[**--no-proxy**[*""*]]
|
||||
[**--node-generic-resources**[=*[]*]]
|
||||
[**-p**|**--pidfile**[=*/var/run/docker.pid*]]
|
||||
[**--raw-logs**]
|
||||
[**--registry-mirror**[=*[]*]]
|
||||
[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]]
|
||||
[**--seccomp-profile**[=*SECCOMP-PROFILE-PATH*]]
|
||||
[**--selinux-enabled**]
|
||||
[**--shutdown-timeout**[=*15*]]
|
||||
[**--storage-opt**[=*[]*]]
|
||||
[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]]
|
||||
[**--tls**]
|
||||
[**--tlscacert**[=*~/.docker/ca.pem*]]
|
||||
[**--tlscert**[=*~/.docker/cert.pem*]]
|
||||
[**--tlskey**[=*~/.docker/key.pem*]]
|
||||
[**--tlsverify**]
|
||||
[**--userland-proxy**[=**true**]]
|
||||
[**--userland-proxy-path**[=*""*]]
|
||||
[**--userns-remap**[=*default*]]
|
||||
[**--validate**]
|
||||
|
||||
# DESCRIPTION
|
||||
**dockerd** is used for starting the Docker daemon (i.e., to command the daemon
|
||||
to manage images, containers etc). So **dockerd** is a server, as a daemon.
|
||||
|
||||
To run the Docker daemon you can specify **dockerd**.
|
||||
You can check the daemon options using **dockerd --help**.
|
||||
Daemon options should be specified after the **dockerd** keyword in the
|
||||
following format.
|
||||
|
||||
**dockerd [OPTIONS]**
|
||||
|
||||
# OPTIONS
|
||||
|
||||
**--add-runtime**=[]
|
||||
Runtimes can be registered with the daemon either via the
|
||||
configuration file or using the `--add-runtime` command line argument.
|
||||
|
||||
The following is an example adding 2 runtimes via the configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "runc",
|
||||
"runtimes": {
|
||||
"runc": {
|
||||
"path": "runc"
|
||||
},
|
||||
"custom": {
|
||||
"path": "/usr/local/bin/my-runc-replacement",
|
||||
"runtimeArgs": [
|
||||
"--debug"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This is the same example via the command line:
|
||||
|
||||
```bash
|
||||
$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement
|
||||
```
|
||||
|
||||
**Note**: defining runtime arguments via the command line is not supported.
|
||||
|
||||
**--allow-nondistributable-artifacts**=[]
|
||||
Push nondistributable artifacts to the specified registries.
|
||||
|
||||
List can contain elements with CIDR notation to specify a whole subnet.
|
||||
|
||||
This option is useful when pushing images containing nondistributable
|
||||
artifacts to a registry on an air-gapped network so hosts on that network can
|
||||
pull the images without connecting to another server.
|
||||
|
||||
**Warning**: Nondistributable artifacts typically have restrictions on how
|
||||
and where they can be distributed and shared. Only use this feature to push
|
||||
artifacts to private registries and ensure that you are in compliance with
|
||||
any terms that cover redistributing nondistributable artifacts.
|
||||
|
||||
**--api-cors-header**=""
|
||||
Set CORS headers in the Engine API. Default is cors disabled. Give urls like
|
||||
"http://foo, http://bar, ...". Give "\*" to allow all.
|
||||
|
||||
**--authorization-plugin**=""
|
||||
Set authorization plugins to load
|
||||
|
||||
**-b**, **--bridge**=""
|
||||
Attach containers to a pre\-existing network bridge; use 'none' to disable
|
||||
container networking
|
||||
|
||||
**--bip**=""
|
||||
Use the provided CIDR notation address for the dynamically created bridge
|
||||
(docker0); Mutually exclusive of \-b
|
||||
|
||||
**--cgroup-parent**=""
|
||||
Set parent cgroup for all containers. Default is "/docker" for fs cgroup
|
||||
driver and "system.slice" for systemd cgroup driver.
|
||||
|
||||
**--config-file**=*"/etc/docker/daemon.json"*
|
||||
Specifies the JSON file path to load the configuration from. Default is
|
||||
*/etc/docker/daemon.json*.
|
||||
|
||||
**--containerd**=""
|
||||
Path to containerd socket.
|
||||
|
||||
**--data-root**=""
|
||||
Path to the directory used to store persisted Docker data such as
|
||||
configuration for resources, swarm cluster state, and filesystem data for
|
||||
images, containers, and local volumes. Default is */var/lib/docker*.
|
||||
|
||||
**-D**, **--debug**=**true**|**false**
|
||||
Enable debug mode. Default is **false**.
|
||||
|
||||
**--default-cgroupns-mode**="**host**|**private**"
|
||||
Set the default cgroup namespace mode for newly created containers. The argument
|
||||
can either be **host** or **private**. If unset, this defaults to **host** on cgroup v1,
|
||||
or **private** on cgroup v2.
|
||||
|
||||
**--default-gateway**=""
|
||||
IPv4 address of the container default gateway; this address must be part of
|
||||
the bridge subnet (which is defined by \-b or \-\-bip)
|
||||
|
||||
**--default-gateway-v6**=""
|
||||
IPv6 address of the container default gateway
|
||||
|
||||
**--default-address-pool**=""
|
||||
Default address pool from which IPAM driver selects a subnet for the networks.
|
||||
Example: base=172.30.0.0/16,size=24 will set the default
|
||||
address pools for the selected scope networks to {172.30.[0-255].0/24}
|
||||
|
||||
**--default-network-opt**=*DRIVER=OPT=VALUE*
|
||||
Default network driver options
|
||||
|
||||
**--default-runtime**=*"runtime"*
|
||||
Set default runtime if there're more than one specified by **--add-runtime**.
|
||||
|
||||
**--default-ipc-mode**="**private**|**shareable**"
|
||||
Set the default IPC mode for newly created containers. The argument
|
||||
can either be **private** or **shareable**.
|
||||
|
||||
**--default-shm-size**=*size*
|
||||
Set the daemon-wide default shm *size* for containers. Default is `64MiB`.
|
||||
|
||||
**--default-ulimit**=[]
|
||||
Default ulimits for containers.
|
||||
|
||||
**--dns**=""
|
||||
Force Docker to use specific DNS servers.
|
||||
|
||||
**--dns-opt**=""
|
||||
DNS options to use.
|
||||
|
||||
**--dns-search**=[]
|
||||
DNS search domains to use.
|
||||
|
||||
**--exec-opt**=[]
|
||||
Set runtime execution options. See RUNTIME EXECUTION OPTIONS.
|
||||
|
||||
**--exec-root**=""
|
||||
Path to use as the root of the Docker execution state files. Default is
|
||||
`/var/run/docker`.
|
||||
|
||||
**--experimental**=""
|
||||
Enable the daemon experimental features.
|
||||
|
||||
**--fixed-cidr**=""
|
||||
IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in
|
||||
the bridge subnet (which is defined by \-b or \-\-bip).
|
||||
|
||||
**--fixed-cidr-v6**=""
|
||||
IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64)
|
||||
|
||||
**-G**, **--group**=""
|
||||
Group to assign the unix socket specified by -H when running in daemon mode.
|
||||
use '' (the empty string) to disable setting of a group. Default is `docker`.
|
||||
|
||||
**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or
|
||||
unix://[/path/to/socket] to use.
|
||||
The socket(s) to bind to in daemon mode specified using one or more
|
||||
tcp://host:port, unix:///path/to/socket, fd://\* or fd://socketfd.
|
||||
|
||||
**--help**
|
||||
Print usage statement
|
||||
|
||||
**--http-proxy***""*
|
||||
Proxy URL for HTTP requests unless overridden by NoProxy.
|
||||
|
||||
**--https-proxy***""*
|
||||
Proxy URL for HTTPS requests unless overridden by NoProxy.
|
||||
|
||||
**--icc**=**true**|**false**
|
||||
Allow unrestricted inter\-container and Docker daemon host communication. If
|
||||
disabled, containers can still be linked together using the **--link** option
|
||||
(see **docker-run**(1)). Default is **true**.
|
||||
|
||||
**--init**
|
||||
Run an init process inside containers for signal forwarding and process
|
||||
reaping.
|
||||
|
||||
**--init-path**
|
||||
Path to the docker-init binary.
|
||||
|
||||
**--insecure-registry**=[]
|
||||
Enable insecure registry communication, i.e., enable un-encrypted and/or
|
||||
untrusted communication.
|
||||
|
||||
List of insecure registries can contain an element with CIDR notation to
|
||||
specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS
|
||||
with certificates from unknown CAs.
|
||||
|
||||
Enabling `--insecure-registry` is useful when running a local registry.
|
||||
However, because its use creates security vulnerabilities it should ONLY be
|
||||
enabled for testing purposes. For increased security, users should add their
|
||||
CA to their system's list of trusted CAs instead of using
|
||||
`--insecure-registry`.
|
||||
|
||||
**--ip**=""
|
||||
Default IP address to use when binding container ports. Default is **0.0.0.0**.
|
||||
|
||||
**--ip-forward**=**true**|**false**
|
||||
Enables IP forwarding on the Docker host. The default is **true**. This flag
|
||||
interacts with the IP forwarding setting on your host system's kernel. If
|
||||
your system has IP forwarding disabled, this setting enables it. If your
|
||||
system has IP forwarding enabled, setting this flag to **false**
|
||||
has no effect.
|
||||
|
||||
This setting will also enable IPv6 forwarding if you have both
|
||||
**--ip-forward=true** and **--fixed-cidr-v6** set. Note that this may reject
|
||||
Router Advertisements and interfere with the host's existing IPv6
|
||||
configuration. For more information, consult the documentation about
|
||||
"Advanced Networking - IPv6".
|
||||
|
||||
**--ip-masq**=**true**|**false**
|
||||
Enable IP masquerading for bridge's IP range. Default is **true**.
|
||||
|
||||
**--iptables**=**true**|**false**
|
||||
Enable Docker's addition of iptables rules. Default is **true**.
|
||||
|
||||
**--ipv6**=**true**|**false**
|
||||
Enable IPv6 support. Default is **false**. Docker will create an IPv6-enabled
|
||||
bridge with address fe80::1 which will allow you to create IPv6-enabled
|
||||
containers. Use together with **--fixed-cidr-v6** to provide globally routable
|
||||
IPv6 addresses. IPv6 forwarding will be enabled if not used with
|
||||
**--ip-forward=false**. This may collide with your host's current IPv6
|
||||
settings. For more information consult the documentation about
|
||||
"Advanced Networking - IPv6".
|
||||
|
||||
**--isolation**="*default*"
|
||||
Isolation specifies the type of isolation technology used by containers.
|
||||
Note that the default on Windows server is `process`, and the default on
|
||||
Windows client is `hyperv`. Linux only supports `default`.
|
||||
|
||||
**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*"
|
||||
Set the logging level. Default is `info`.
|
||||
|
||||
**--label**="[]"
|
||||
Set key=value labels to the daemon (displayed in `docker info`)
|
||||
|
||||
**--live-restore**=**false**
|
||||
Enable live restore of running containers when the daemon starts so that they
|
||||
are not restarted. This option is applicable only for docker daemon running
|
||||
on Linux host.
|
||||
|
||||
**--log-driver**="**json-file**|**syslog**|**journald**|**gelf**|**fluentd**|**awslogs**|**splunk**|**etwlogs**|**gcplogs**|**none**"
|
||||
Default driver for container logs. Default is **json-file**.
|
||||
**Warning**: **docker logs** command works only for **json-file** logging driver.
|
||||
|
||||
**--log-format**="*text*|*json*"
|
||||
Set the format for logs produced by the daemon. Default is "text".
|
||||
|
||||
**--log-opt**=[]
|
||||
Logging driver specific options.
|
||||
|
||||
**--mtu**=*0*
|
||||
Set the containers network mtu. Default is `0`.
|
||||
|
||||
**--max-concurrent-downloads**=*3*
|
||||
Set the max concurrent downloads. Default is `3`.
|
||||
|
||||
**--max-concurrent-uploads**=*5*
|
||||
Set the max concurrent uploads. Default is `5`.
|
||||
|
||||
**--max-download-attempts**=*5*
|
||||
Set the max download attempts for each pull. Default is `5`.
|
||||
|
||||
**--no-proxy**=*""*"
|
||||
Comma-separated values specifying hosts that should be excluded from proxying.
|
||||
|
||||
**--node-generic-resources**=*[]*
|
||||
Advertise user-defined resource. Default is `[]`.
|
||||
Use this if your swarm cluster has some nodes with custom
|
||||
resources (e.g: NVIDIA GPU, SSD, ...) and you need your services to land on
|
||||
nodes advertising these resources.
|
||||
Usage example: `--node-generic-resources "NVIDIA-GPU=UUID1"
|
||||
--node-generic-resources "NVIDIA-GPU=UUID2"`
|
||||
|
||||
|
||||
**-p**, **--pidfile**="*path*"
|
||||
Path to use for daemon PID file. Default is */var/run/docker.pid*.
|
||||
|
||||
**--raw-logs**
|
||||
Output daemon logs in full timestamp format without ANSI coloring. If this
|
||||
flag is not set, the daemon outputs condensed, colorized logs if a terminal
|
||||
is detected, or full ("raw") output otherwise.
|
||||
|
||||
**--registry-mirror**=*<scheme>://<host>*
|
||||
Prepend a registry mirror to be used for image pulls. May be specified
|
||||
multiple times.
|
||||
|
||||
**-s**, **--storage-driver**=""
|
||||
Force the Docker runtime to use a specific storage driver.
|
||||
|
||||
**--seccomp-profile**=""
|
||||
Path to seccomp profile.
|
||||
|
||||
**--selinux-enabled**=**true**|**false**
|
||||
Enable selinux support. Default is **false**.
|
||||
|
||||
**--shutdown-timeout**=*seconds*
|
||||
Set the shutdown timeout value in seconds. Default is **15**.
|
||||
|
||||
**--storage-opt**=[]
|
||||
Set storage driver options. See STORAGE DRIVER OPTIONS.
|
||||
|
||||
**--swarm-default-advertise-addr**=*IP*|*INTERFACE*
|
||||
Set default address or interface for swarm to advertise as its
|
||||
externally-reachable address to other cluster members. This can be a
|
||||
hostname, an IP address, or an interface such as `eth0`. A port cannot be
|
||||
specified with this option.
|
||||
|
||||
**--tls**=**true**|**false**
|
||||
Use TLS; implied by **--tlsverify**. Default is **false**.
|
||||
|
||||
**--tlscacert**=*~/.docker/ca.pem*
|
||||
Trust certs signed only by this CA.
|
||||
|
||||
**--tlscert**=*~/.docker/cert.pem*
|
||||
Path to TLS certificate file.
|
||||
|
||||
**--tlskey**=*~/.docker/key.pem*
|
||||
Path to TLS key file.
|
||||
|
||||
**--tlsverify**=**true**|**false**
|
||||
Use TLS and verify the remote (daemon: verify client, client: verify daemon).
|
||||
Default is **false**.
|
||||
|
||||
**--userland-proxy**=**true**|**false**
|
||||
Rely on a userland proxy implementation for inter-container and
|
||||
outside-to-container loopback communications. Default is **true**.
|
||||
|
||||
**--userland-proxy-path**=""
|
||||
Path to the userland proxy binary.
|
||||
|
||||
**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid*
|
||||
Enable user namespaces for containers on the daemon. Specifying "default"
|
||||
will cause a new user and group to be created to handle UID and GID range
|
||||
remapping for the user namespace mappings used for contained processes.
|
||||
Specifying a user (or uid) and optionally a group (or gid) will cause the
|
||||
daemon to lookup the user and group's subordinate ID ranges for use as the
|
||||
user namespace mappings for contained processes.
|
||||
|
||||
**--validate**
|
||||
Validate daemon configuration and exit.
|
||||
|
||||
# STORAGE DRIVER OPTIONS
|
||||
|
||||
Docker uses storage backends (known as "storage drivers" in the Docker
|
||||
internals) to create writable containers from images. Many of these
|
||||
backends use operating system level technologies and can be
|
||||
configured.
|
||||
|
||||
Specify options to the storage backend with **--storage-opt** flags. The
|
||||
backends that currently take options are **zfs** and **btrfs**.
|
||||
Options for **zfs** start with **zfs.**, and options for **btrfs** start
|
||||
with **btrfs.**.
|
||||
|
||||
## ZFS options
|
||||
|
||||
#### zfs.fsname
|
||||
|
||||
Set zfs filesystem under which docker will create its own datasets. By default
|
||||
docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`)
|
||||
is located.
|
||||
|
||||
Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker`
|
||||
|
||||
## Btrfs options
|
||||
|
||||
#### btrfs.min\_space
|
||||
|
||||
Specifies the minimum size to use when creating the subvolume which is used for
|
||||
containers. If user uses disk quota for btrfs when creating or running a
|
||||
container with **--storage-opt size** option, docker should ensure the **size**
|
||||
cannot be smaller than **btrfs.min_space**.
|
||||
|
||||
Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G`
|
||||
|
||||
# Access authorization
|
||||
|
||||
Docker's access authorization can be extended by authorization plugins that
|
||||
your organization can purchase or build themselves. You can install one or more
|
||||
authorization plugins when you start the Docker `daemon` using the
|
||||
`--authorization-plugin=PLUGIN_ID` option.
|
||||
|
||||
```bash
|
||||
dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
|
||||
```
|
||||
|
||||
The `PLUGIN_ID` value is either the plugin's name or a path to its
|
||||
specification file. The plugin's implementation determines whether you can
|
||||
specify a name or path. Consult with your Docker administrator to get
|
||||
information about the plugins available to you.
|
||||
|
||||
Once a plugin is installed, requests made to the `daemon` through the
|
||||
command line or Docker's Engine API are allowed or denied by the plugin.
|
||||
If you have multiple plugins installed, each plugin, in order, must
|
||||
allow the request for it to complete.
|
||||
|
||||
For information about how to create an authorization plugin, see [access authorization
|
||||
plugin](https://docs.docker.com/engine/extend/plugins_authorization/) section in the
|
||||
Docker extend section of this documentation.
|
||||
|
||||
# RUNTIME EXECUTION OPTIONS
|
||||
|
||||
You can configure the runtime using options specified with the `--exec-opt` flag.
|
||||
All the flag's options have the `native` prefix. A single `native.cgroupdriver`
|
||||
option is available.
|
||||
|
||||
The `native.cgroupdriver` option specifies the management of the container's
|
||||
cgroups. You can only specify `cgroupfs` or `systemd`. If you specify
|
||||
`systemd` and it is not available, the system errors out. If you omit the
|
||||
`native.cgroupdriver` option,` cgroupfs` is used on cgroup v1 hosts, `systemd`
|
||||
is used on cgroup v2 hosts with systemd available.
|
||||
|
||||
This example sets the `cgroupdriver` to `systemd`:
|
||||
|
||||
```bash
|
||||
$ sudo dockerd --exec-opt native.cgroupdriver=systemd
|
||||
```
|
||||
|
||||
Setting this option applies to all containers the daemon launches.
|
||||
|
||||
# HISTORY
|
||||
Sept 2015, Originally compiled by Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||
based on docker.com source material and internal work.
|
||||
7
man/go.mod
Normal file
7
man/go.mod
Normal file
@@ -0,0 +1,7 @@
|
||||
module github.com/docker/docker/man
|
||||
|
||||
go 1.19
|
||||
|
||||
require github.com/cpuguy83/go-md2man/v2 v2.0.4
|
||||
|
||||
require github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
4
man/go.sum
Normal file
4
man/go.sum
Normal file
@@ -0,0 +1,4 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
7
man/tools.go
Normal file
7
man/tools.go
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build tools
|
||||
|
||||
package man
|
||||
|
||||
import (
|
||||
_ "github.com/cpuguy83/go-md2man/v2"
|
||||
)
|
||||
2
man/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
generated
vendored
Normal file
2
man/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
go-md2man
|
||||
bin
|
||||
6
man/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
generated
vendored
Normal file
6
man/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# For documentation, see https://golangci-lint.run/usage/configuration/
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- gofumpt
|
||||
|
||||
14
man/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
generated
vendored
Normal file
14
man/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
ARG GO_VERSION=1.21
|
||||
|
||||
FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} AS build
|
||||
COPY . /go/src/github.com/cpuguy83/go-md2man
|
||||
WORKDIR /go/src/github.com/cpuguy83/go-md2man
|
||||
ARG TARGETOS TARGETARCH TARGETVARIANT
|
||||
RUN \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
make build
|
||||
|
||||
FROM scratch
|
||||
COPY --from=build /go/src/github.com/cpuguy83/go-md2man/bin/go-md2man /go-md2man
|
||||
ENTRYPOINT ["/go-md2man"]
|
||||
21
man/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
21
man/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Brian Goff
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
49
man/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
generated
vendored
Normal file
49
man/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
GO111MODULE ?= on
|
||||
|
||||
export GO111MODULE
|
||||
|
||||
GOOS ?= $(if $(TARGETOS),$(TARGETOS),)
|
||||
GOARCH ?= $(if $(TARGETARCH),$(TARGETARCH),)
|
||||
|
||||
ifeq ($(TARGETARCH),amd64)
|
||||
GOAMD64 ?= $(TARGETVARIANT)
|
||||
endif
|
||||
|
||||
ifeq ($(TARGETARCH),arm)
|
||||
GOARM ?= $(TARGETVARIANT:v%=%)
|
||||
endif
|
||||
|
||||
ifneq ($(GOOS),)
|
||||
export GOOS
|
||||
endif
|
||||
|
||||
ifneq ($(GOARCH),)
|
||||
export GOARCH
|
||||
endif
|
||||
|
||||
ifneq ($(GOAMD64),)
|
||||
export GOAMD64
|
||||
endif
|
||||
|
||||
ifneq ($(GOARM),)
|
||||
export GOARM
|
||||
endif
|
||||
|
||||
.PHONY:
|
||||
build: bin/go-md2man
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
@rm -rf bin/*
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@go test $(TEST_FLAGS) ./...
|
||||
|
||||
bin/go-md2man: go.mod go.sum md2man/* *.go
|
||||
@mkdir -p bin
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -o $@
|
||||
|
||||
.PHONY: mod
|
||||
mod:
|
||||
@go mod tidy
|
||||
15
man/vendor/github.com/cpuguy83/go-md2man/v2/README.md
generated
vendored
Normal file
15
man/vendor/github.com/cpuguy83/go-md2man/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
go-md2man
|
||||
=========
|
||||
|
||||
Converts markdown into roff (man pages).
|
||||
|
||||
Uses blackfriday to process markdown into man pages.
|
||||
|
||||
### Usage
|
||||
|
||||
./md2man -in /path/to/markdownfile.md -out /manfile/output/path
|
||||
|
||||
### How to contribute
|
||||
|
||||
We use go modules to manage dependencies.
|
||||
As such you must be using at lest go1.11.
|
||||
28
man/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
generated
vendored
Normal file
28
man/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
go-md2man 1 "January 2015" go-md2man "User Manual"
|
||||
==================================================
|
||||
|
||||
# NAME
|
||||
go-md2man - Convert markdown files into manpages
|
||||
|
||||
# SYNOPSIS
|
||||
**go-md2man** [**-in**=*/path/to/md/file*] [**-out**=*/path/to/output*]
|
||||
|
||||
# DESCRIPTION
|
||||
**go-md2man** converts standard markdown formatted documents into manpages. It is
|
||||
written purely in Go so as to reduce dependencies on 3rd party libs.
|
||||
|
||||
By default, the input is stdin and the output is stdout.
|
||||
|
||||
# EXAMPLES
|
||||
Convert the markdown file *go-md2man.1.md* into a manpage:
|
||||
```
|
||||
go-md2man < go-md2man.1.md > go-md2man.1
|
||||
```
|
||||
|
||||
Same, but using command line arguments instead of shell redirection:
|
||||
```
|
||||
go-md2man -in=go-md2man.1.md -out=go-md2man.1
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
January 2015, Originally compiled by Brian Goff (cpuguy83@gmail.com).
|
||||
53
man/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
generated
vendored
Normal file
53
man/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/cpuguy83/go-md2man/v2/md2man"
|
||||
)
|
||||
|
||||
var (
|
||||
inFilePath = flag.String("in", "", "Path to file to be processed (default: stdin)")
|
||||
outFilePath = flag.String("out", "", "Path to output processed file (default: stdout)")
|
||||
)
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
flag.Parse()
|
||||
|
||||
inFile := os.Stdin
|
||||
if *inFilePath != "" {
|
||||
inFile, err = os.Open(*inFilePath)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
defer inFile.Close() // nolint: errcheck
|
||||
|
||||
doc, err := ioutil.ReadAll(inFile)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
out := md2man.Render(doc)
|
||||
|
||||
outFile := os.Stdout
|
||||
if *outFilePath != "" {
|
||||
outFile, err = os.Create(*outFilePath)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer outFile.Close() // nolint: errcheck
|
||||
}
|
||||
_, err = outFile.Write(out)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
16
man/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
16
man/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package md2man
|
||||
|
||||
import (
|
||||
"github.com/russross/blackfriday/v2"
|
||||
)
|
||||
|
||||
// Render converts a markdown document into a roff formatted document.
|
||||
func Render(doc []byte) []byte {
|
||||
renderer := NewRoffRenderer()
|
||||
|
||||
return blackfriday.Run(doc,
|
||||
[]blackfriday.Option{
|
||||
blackfriday.WithRenderer(renderer),
|
||||
blackfriday.WithExtensions(renderer.GetExtensions()),
|
||||
}...)
|
||||
}
|
||||
382
man/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
382
man/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
@@ -0,0 +1,382 @@
|
||||
package md2man
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/russross/blackfriday/v2"
|
||||
)
|
||||
|
||||
// roffRenderer implements the blackfriday.Renderer interface for creating
|
||||
// roff format (manpages) from markdown text
|
||||
type roffRenderer struct {
|
||||
extensions blackfriday.Extensions
|
||||
listCounters []int
|
||||
firstHeader bool
|
||||
firstDD bool
|
||||
listDepth int
|
||||
}
|
||||
|
||||
const (
|
||||
titleHeader = ".TH "
|
||||
topLevelHeader = "\n\n.SH "
|
||||
secondLevelHdr = "\n.SH "
|
||||
otherHeader = "\n.SS "
|
||||
crTag = "\n"
|
||||
emphTag = "\\fI"
|
||||
emphCloseTag = "\\fP"
|
||||
strongTag = "\\fB"
|
||||
strongCloseTag = "\\fP"
|
||||
breakTag = "\n.br\n"
|
||||
paraTag = "\n.PP\n"
|
||||
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||
linkTag = "\n\\[la]"
|
||||
linkCloseTag = "\\[ra]"
|
||||
codespanTag = "\\fB"
|
||||
codespanCloseTag = "\\fR"
|
||||
codeTag = "\n.EX\n"
|
||||
codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on).
|
||||
quoteTag = "\n.PP\n.RS\n"
|
||||
quoteCloseTag = "\n.RE\n"
|
||||
listTag = "\n.RS\n"
|
||||
listCloseTag = "\n.RE\n"
|
||||
dtTag = "\n.TP\n"
|
||||
dd2Tag = "\n"
|
||||
tableStart = "\n.TS\nallbox;\n"
|
||||
tableEnd = ".TE\n"
|
||||
tableCellStart = "T{\n"
|
||||
tableCellEnd = "\nT}\n"
|
||||
tablePreprocessor = `'\" t`
|
||||
)
|
||||
|
||||
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||
// from markdown
|
||||
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||
var extensions blackfriday.Extensions
|
||||
|
||||
extensions |= blackfriday.NoIntraEmphasis
|
||||
extensions |= blackfriday.Tables
|
||||
extensions |= blackfriday.FencedCode
|
||||
extensions |= blackfriday.SpaceHeadings
|
||||
extensions |= blackfriday.Footnotes
|
||||
extensions |= blackfriday.Titleblock
|
||||
extensions |= blackfriday.DefinitionLists
|
||||
return &roffRenderer{
|
||||
extensions: extensions,
|
||||
}
|
||||
}
|
||||
|
||||
// GetExtensions returns the list of extensions used by this renderer implementation
|
||||
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||
return r.extensions
|
||||
}
|
||||
|
||||
// RenderHeader handles outputting the header at document start
|
||||
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||||
// We need to walk the tree to check if there are any tables.
|
||||
// If there are, we need to enable the roff table preprocessor.
|
||||
ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
if node.Type == blackfriday.Table {
|
||||
out(w, tablePreprocessor+"\n")
|
||||
return blackfriday.Terminate
|
||||
}
|
||||
return blackfriday.GoToNext
|
||||
})
|
||||
|
||||
// disable hyphenation
|
||||
out(w, ".nh\n")
|
||||
}
|
||||
|
||||
// RenderFooter handles outputting the footer at the document end; the roff
|
||||
// renderer has no footer information
|
||||
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||||
}
|
||||
|
||||
// RenderNode is called for each node in a markdown document; based on the node
|
||||
// type the equivalent roff output is sent to the writer
|
||||
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
walkAction := blackfriday.GoToNext
|
||||
|
||||
switch node.Type {
|
||||
case blackfriday.Text:
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
case blackfriday.Softbreak:
|
||||
out(w, crTag)
|
||||
case blackfriday.Hardbreak:
|
||||
out(w, breakTag)
|
||||
case blackfriday.Emph:
|
||||
if entering {
|
||||
out(w, emphTag)
|
||||
} else {
|
||||
out(w, emphCloseTag)
|
||||
}
|
||||
case blackfriday.Strong:
|
||||
if entering {
|
||||
out(w, strongTag)
|
||||
} else {
|
||||
out(w, strongCloseTag)
|
||||
}
|
||||
case blackfriday.Link:
|
||||
// Don't render the link text for automatic links, because this
|
||||
// will only duplicate the URL in the roff output.
|
||||
// See https://daringfireball.net/projects/markdown/syntax#autolink
|
||||
if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
|
||||
out(w, string(node.FirstChild.Literal))
|
||||
}
|
||||
// Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
|
||||
escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
|
||||
out(w, linkTag+escapedLink+linkCloseTag)
|
||||
walkAction = blackfriday.SkipChildren
|
||||
case blackfriday.Image:
|
||||
// ignore images
|
||||
walkAction = blackfriday.SkipChildren
|
||||
case blackfriday.Code:
|
||||
out(w, codespanTag)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
out(w, codespanCloseTag)
|
||||
case blackfriday.Document:
|
||||
break
|
||||
case blackfriday.Paragraph:
|
||||
// roff .PP markers break lists
|
||||
if r.listDepth > 0 {
|
||||
return blackfriday.GoToNext
|
||||
}
|
||||
if entering {
|
||||
out(w, paraTag)
|
||||
} else {
|
||||
out(w, crTag)
|
||||
}
|
||||
case blackfriday.BlockQuote:
|
||||
if entering {
|
||||
out(w, quoteTag)
|
||||
} else {
|
||||
out(w, quoteCloseTag)
|
||||
}
|
||||
case blackfriday.Heading:
|
||||
r.handleHeading(w, node, entering)
|
||||
case blackfriday.HorizontalRule:
|
||||
out(w, hruleTag)
|
||||
case blackfriday.List:
|
||||
r.handleList(w, node, entering)
|
||||
case blackfriday.Item:
|
||||
r.handleItem(w, node, entering)
|
||||
case blackfriday.CodeBlock:
|
||||
out(w, codeTag)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
out(w, codeCloseTag)
|
||||
case blackfriday.Table:
|
||||
r.handleTable(w, node, entering)
|
||||
case blackfriday.TableHead:
|
||||
case blackfriday.TableBody:
|
||||
case blackfriday.TableRow:
|
||||
// no action as cell entries do all the nroff formatting
|
||||
return blackfriday.GoToNext
|
||||
case blackfriday.TableCell:
|
||||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.HTMLSpan:
|
||||
// ignore other HTML tags
|
||||
case blackfriday.HTMLBlock:
|
||||
if bytes.HasPrefix(node.Literal, []byte("<!--")) {
|
||||
break // ignore comments, no warning
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
}
|
||||
return walkAction
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
switch node.Level {
|
||||
case 1:
|
||||
if !r.firstHeader {
|
||||
out(w, titleHeader)
|
||||
r.firstHeader = true
|
||||
break
|
||||
}
|
||||
out(w, topLevelHeader)
|
||||
case 2:
|
||||
out(w, secondLevelHdr)
|
||||
default:
|
||||
out(w, otherHeader)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
openTag := listTag
|
||||
closeTag := listCloseTag
|
||||
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// tags for definition lists handled within Item node
|
||||
openTag = ""
|
||||
closeTag = ""
|
||||
}
|
||||
if entering {
|
||||
r.listDepth++
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
r.listCounters = append(r.listCounters, 1)
|
||||
}
|
||||
out(w, openTag)
|
||||
} else {
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||||
}
|
||||
out(w, closeTag)
|
||||
r.listDepth--
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||
r.listCounters[len(r.listCounters)-1]++
|
||||
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||||
// DT (definition term): line just before DD (see below).
|
||||
out(w, dtTag)
|
||||
r.firstDD = true
|
||||
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// DD (definition description): line that starts with ": ".
|
||||
//
|
||||
// We have to distinguish between the first DD and the
|
||||
// subsequent ones, as there should be no vertical
|
||||
// whitespace between the DT and the first DD.
|
||||
if r.firstDD {
|
||||
r.firstDD = false
|
||||
} else {
|
||||
out(w, dd2Tag)
|
||||
}
|
||||
} else {
|
||||
out(w, ".IP \\(bu 2\n")
|
||||
}
|
||||
} else {
|
||||
out(w, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
out(w, tableStart)
|
||||
// call walker to count cells (and rows?) so format section can be produced
|
||||
columns := countColumns(node)
|
||||
out(w, strings.Repeat("l ", columns)+"\n")
|
||||
out(w, strings.Repeat("l ", columns)+".\n")
|
||||
} else {
|
||||
out(w, tableEnd)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
var start string
|
||||
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||
start = "\t"
|
||||
}
|
||||
if node.IsHeader {
|
||||
start += strongTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
start += tableCellStart
|
||||
}
|
||||
out(w, start)
|
||||
} else {
|
||||
var end string
|
||||
if node.IsHeader {
|
||||
end = strongCloseTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
end = tableCellEnd
|
||||
}
|
||||
if node.Next == nil && end != tableCellEnd {
|
||||
// Last cell: need to carriage return if we are at the end of the
|
||||
// header row and content isn't wrapped in a "tablecell"
|
||||
end += crTag
|
||||
}
|
||||
out(w, end)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeLiteralSize(node *blackfriday.Node) int {
|
||||
total := 0
|
||||
for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||||
total += len(n.Literal)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// because roff format requires knowing the column count before outputting any table
|
||||
// data we need to walk a table tree and count the columns
|
||||
func countColumns(node *blackfriday.Node) int {
|
||||
var columns int
|
||||
|
||||
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
switch node.Type {
|
||||
case blackfriday.TableRow:
|
||||
if !entering {
|
||||
return blackfriday.Terminate
|
||||
}
|
||||
case blackfriday.TableCell:
|
||||
if entering {
|
||||
columns++
|
||||
}
|
||||
default:
|
||||
}
|
||||
return blackfriday.GoToNext
|
||||
})
|
||||
return columns
|
||||
}
|
||||
|
||||
func out(w io.Writer, output string) {
|
||||
io.WriteString(w, output) // nolint: errcheck
|
||||
}
|
||||
|
||||
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(text))
|
||||
|
||||
// count the number of lines in the text
|
||||
// we need to know this to avoid adding a newline after the last line
|
||||
n := bytes.Count(text, []byte{'\n'})
|
||||
idx := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
dt := scanner.Bytes()
|
||||
if idx < n {
|
||||
idx++
|
||||
dt = append(dt, '\n')
|
||||
}
|
||||
escapeSpecialCharsLine(w, dt)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func escapeSpecialCharsLine(w io.Writer, text []byte) {
|
||||
for i := 0; i < len(text); i++ {
|
||||
// escape initial apostrophe or period
|
||||
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||
out(w, "\\&")
|
||||
}
|
||||
|
||||
// directly copy normal characters
|
||||
org := i
|
||||
|
||||
for i < len(text) && text[i] != '\\' {
|
||||
i++
|
||||
}
|
||||
if i > org {
|
||||
w.Write(text[org:i]) // nolint: errcheck
|
||||
}
|
||||
|
||||
// escape a character
|
||||
if i >= len(text) {
|
||||
break
|
||||
}
|
||||
|
||||
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||
}
|
||||
}
|
||||
8
man/vendor/github.com/russross/blackfriday/v2/.gitignore
generated
vendored
Normal file
8
man/vendor/github.com/russross/blackfriday/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
*.out
|
||||
*.swp
|
||||
*.8
|
||||
*.6
|
||||
_obj
|
||||
_test*
|
||||
markdown
|
||||
tags
|
||||
17
man/vendor/github.com/russross/blackfriday/v2/.travis.yml
generated
vendored
Normal file
17
man/vendor/github.com/russross/blackfriday/v2/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- tip
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- go tool vet .
|
||||
- go test -v ./...
|
||||
29
man/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
generated
vendored
Normal file
29
man/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
Blackfriday is distributed under the Simplified BSD License:
|
||||
|
||||
> Copyright © 2011 Russ Ross
|
||||
> All rights reserved.
|
||||
>
|
||||
> Redistribution and use in source and binary forms, with or without
|
||||
> modification, are permitted provided that the following conditions
|
||||
> are met:
|
||||
>
|
||||
> 1. Redistributions of source code must retain the above copyright
|
||||
> notice, this list of conditions and the following disclaimer.
|
||||
>
|
||||
> 2. Redistributions in binary form must reproduce the above
|
||||
> copyright notice, this list of conditions and the following
|
||||
> disclaimer in the documentation and/or other materials provided with
|
||||
> the distribution.
|
||||
>
|
||||
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
> POSSIBILITY OF SUCH DAMAGE.
|
||||
335
man/vendor/github.com/russross/blackfriday/v2/README.md
generated
vendored
Normal file
335
man/vendor/github.com/russross/blackfriday/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
Blackfriday
|
||||
[![Build Status][BuildV2SVG]][BuildV2URL]
|
||||
[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL]
|
||||
===========
|
||||
|
||||
Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
|
||||
is paranoid about its input (so you can safely feed it user-supplied
|
||||
data), it is fast, it supports common extensions (tables, smart
|
||||
punctuation substitutions, etc.), and it is safe for all utf-8
|
||||
(unicode) input.
|
||||
|
||||
HTML output is currently supported, along with Smartypants
|
||||
extensions.
|
||||
|
||||
It started as a translation from C of [Sundown][3].
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Blackfriday is compatible with modern Go releases in module mode.
|
||||
With Go installed:
|
||||
|
||||
go get github.com/russross/blackfriday/v2
|
||||
|
||||
will resolve and add the package to the current development module,
|
||||
then build and install it. Alternatively, you can achieve the same
|
||||
if you import it in a package:
|
||||
|
||||
import "github.com/russross/blackfriday/v2"
|
||||
|
||||
and `go get` without parameters.
|
||||
|
||||
Legacy GOPATH mode is unsupported.
|
||||
|
||||
|
||||
Versions
|
||||
--------
|
||||
|
||||
Currently maintained and recommended version of Blackfriday is `v2`. It's being
|
||||
developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
|
||||
documentation is available at
|
||||
https://pkg.go.dev/github.com/russross/blackfriday/v2.
|
||||
|
||||
It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`.
|
||||
|
||||
Version 2 offers a number of improvements over v1:
|
||||
|
||||
* Cleaned up API
|
||||
* A separate call to [`Parse`][4], which produces an abstract syntax tree for
|
||||
the document
|
||||
* Latest bug fixes
|
||||
* Flexibility to easily add your own rendering extensions
|
||||
|
||||
Potential drawbacks:
|
||||
|
||||
* Our benchmarks show v2 to be slightly slower than v1. Currently in the
|
||||
ballpark of around 15%.
|
||||
* API breakage. If you can't afford modifying your code to adhere to the new API
|
||||
and don't care too much about the new features, v2 is probably not for you.
|
||||
* Several bug fixes are trailing behind and still need to be forward-ported to
|
||||
v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
|
||||
tracking.
|
||||
|
||||
If you are still interested in the legacy `v1`, you can import it from
|
||||
`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
|
||||
here: https://pkg.go.dev/github.com/russross/blackfriday.
|
||||
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
For the most sensible markdown processing, it is as simple as getting your input
|
||||
into a byte slice and calling:
|
||||
|
||||
```go
|
||||
output := blackfriday.Run(input)
|
||||
```
|
||||
|
||||
Your input will be parsed and the output rendered with a set of most popular
|
||||
extensions enabled. If you want the most basic feature set, corresponding with
|
||||
the bare Markdown specification, use:
|
||||
|
||||
```go
|
||||
output := blackfriday.Run(input, blackfriday.WithNoExtensions())
|
||||
```
|
||||
|
||||
### Sanitize untrusted content
|
||||
|
||||
Blackfriday itself does nothing to protect against malicious content. If you are
|
||||
dealing with user-supplied markdown, we recommend running Blackfriday's output
|
||||
through HTML sanitizer such as [Bluemonday][5].
|
||||
|
||||
Here's an example of simple usage of Blackfriday together with Bluemonday:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/microcosm-cc/bluemonday"
|
||||
"github.com/russross/blackfriday/v2"
|
||||
)
|
||||
|
||||
// ...
|
||||
unsafe := blackfriday.Run(input)
|
||||
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
|
||||
```
|
||||
|
||||
### Custom options
|
||||
|
||||
If you want to customize the set of options, use `blackfriday.WithExtensions`,
|
||||
`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
|
||||
|
||||
### `blackfriday-tool`
|
||||
|
||||
You can also check out `blackfriday-tool` for a more complete example
|
||||
of how to use it. Download and install it using:
|
||||
|
||||
go get github.com/russross/blackfriday-tool
|
||||
|
||||
This is a simple command-line tool that allows you to process a
|
||||
markdown file using a standalone program. You can also browse the
|
||||
source directly on github if you are just looking for some example
|
||||
code:
|
||||
|
||||
* <https://github.com/russross/blackfriday-tool>
|
||||
|
||||
Note that if you have not already done so, installing
|
||||
`blackfriday-tool` will be sufficient to download and install
|
||||
blackfriday in addition to the tool itself. The tool binary will be
|
||||
installed in `$GOPATH/bin`. This is a statically-linked binary that
|
||||
can be copied to wherever you need it without worrying about
|
||||
dependencies and library versions.
|
||||
|
||||
### Sanitized anchor names
|
||||
|
||||
Blackfriday includes an algorithm for creating sanitized anchor names
|
||||
corresponding to a given input text. This algorithm is used to create
|
||||
anchors for headings when `AutoHeadingIDs` extension is enabled. The
|
||||
algorithm has a specification, so that other packages can create
|
||||
compatible anchor names and links to those anchors.
|
||||
|
||||
The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names.
|
||||
|
||||
[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to
|
||||
create compatible links to the anchor names generated by blackfriday.
|
||||
This algorithm is also implemented in a small standalone package at
|
||||
[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
|
||||
that want a small package and don't need full functionality of blackfriday.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
All features of Sundown are supported, including:
|
||||
|
||||
* **Compatibility**. The Markdown v1.0.3 test suite passes with
|
||||
the `--tidy` option. Without `--tidy`, the differences are
|
||||
mostly in whitespace and entity escaping, where blackfriday is
|
||||
more consistent and cleaner.
|
||||
|
||||
* **Common extensions**, including table support, fenced code
|
||||
blocks, autolinks, strikethroughs, non-strict emphasis, etc.
|
||||
|
||||
* **Safety**. Blackfriday is paranoid when parsing, making it safe
|
||||
to feed untrusted user input without fear of bad things
|
||||
happening. The test suite stress tests this and there are no
|
||||
known inputs that make it crash. If you find one, please let me
|
||||
know and send me the input that does it.
|
||||
|
||||
NOTE: "safety" in this context means *runtime safety only*. In order to
|
||||
protect yourself against JavaScript injection in untrusted content, see
|
||||
[this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
|
||||
|
||||
* **Fast processing**. It is fast enough to render on-demand in
|
||||
most web applications without having to cache the output.
|
||||
|
||||
* **Thread safety**. You can run multiple parsers in different
|
||||
goroutines without ill effect. There is no dependence on global
|
||||
shared state.
|
||||
|
||||
* **Minimal dependencies**. Blackfriday only depends on standard
|
||||
library packages in Go. The source code is pretty
|
||||
self-contained, so it is easy to add to any project, including
|
||||
Google App Engine projects.
|
||||
|
||||
* **Standards compliant**. Output successfully validates using the
|
||||
W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
|
||||
|
||||
|
||||
Extensions
|
||||
----------
|
||||
|
||||
In addition to the standard markdown syntax, this package
|
||||
implements the following extensions:
|
||||
|
||||
* **Intra-word emphasis supression**. The `_` character is
|
||||
commonly used inside words when discussing code, so having
|
||||
markdown interpret it as an emphasis command is usually the
|
||||
wrong thing. Blackfriday lets you treat all emphasis markers as
|
||||
normal characters when they occur inside a word.
|
||||
|
||||
* **Tables**. Tables can be created by drawing them in the input
|
||||
using a simple syntax:
|
||||
|
||||
```
|
||||
Name | Age
|
||||
--------|------
|
||||
Bob | 27
|
||||
Alice | 23
|
||||
```
|
||||
|
||||
* **Fenced code blocks**. In addition to the normal 4-space
|
||||
indentation to mark code blocks, you can explicitly mark them
|
||||
and supply a language (to make syntax highlighting simple). Just
|
||||
mark it like this:
|
||||
|
||||
```go
|
||||
func getTrue() bool {
|
||||
return true
|
||||
}
|
||||
```
|
||||
|
||||
You can use 3 or more backticks to mark the beginning of the
|
||||
block, and the same number to mark the end of the block.
|
||||
|
||||
To preserve classes of fenced code blocks while using the bluemonday
|
||||
HTML sanitizer, use the following policy:
|
||||
|
||||
```go
|
||||
p := bluemonday.UGCPolicy()
|
||||
p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
|
||||
html := p.SanitizeBytes(unsafe)
|
||||
```
|
||||
|
||||
* **Definition lists**. A simple definition list is made of a single-line
|
||||
term followed by a colon and the definition for that term.
|
||||
|
||||
Cat
|
||||
: Fluffy animal everyone likes
|
||||
|
||||
Internet
|
||||
: Vector of transmission for pictures of cats
|
||||
|
||||
Terms must be separated from the previous definition by a blank line.
|
||||
|
||||
* **Footnotes**. A marker in the text that will become a superscript number;
|
||||
a footnote definition that will be placed in a list of footnotes at the
|
||||
end of the document. A footnote looks like this:
|
||||
|
||||
This is a footnote.[^1]
|
||||
|
||||
[^1]: the footnote text.
|
||||
|
||||
* **Autolinking**. Blackfriday can find URLs that have not been
|
||||
explicitly marked as links and turn them into links.
|
||||
|
||||
* **Strikethrough**. Use two tildes (`~~`) to mark text that
|
||||
should be crossed out.
|
||||
|
||||
* **Hard line breaks**. With this extension enabled newlines in the input
|
||||
translate into line breaks in the output. This extension is off by default.
|
||||
|
||||
* **Smart quotes**. Smartypants-style punctuation substitution is
|
||||
supported, turning normal double- and single-quote marks into
|
||||
curly quotes, etc.
|
||||
|
||||
* **LaTeX-style dash parsing** is an additional option, where `--`
|
||||
is translated into `–`, and `---` is translated into
|
||||
`—`. This differs from most smartypants processors, which
|
||||
turn a single hyphen into an ndash and a double hyphen into an
|
||||
mdash.
|
||||
|
||||
* **Smart fractions**, where anything that looks like a fraction
|
||||
is translated into suitable HTML (instead of just a few special
|
||||
cases like most smartypant processors). For example, `4/5`
|
||||
becomes `<sup>4</sup>⁄<sub>5</sub>`, which renders as
|
||||
<sup>4</sup>⁄<sub>5</sub>.
|
||||
|
||||
|
||||
Other renderers
|
||||
---------------
|
||||
|
||||
Blackfriday is structured to allow alternative rendering engines. Here
|
||||
are a few of note:
|
||||
|
||||
* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown):
|
||||
provides a GitHub Flavored Markdown renderer with fenced code block
|
||||
highlighting, clickable heading anchor links.
|
||||
|
||||
It's not customizable, and its goal is to produce HTML output
|
||||
equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
|
||||
except the rendering is performed locally.
|
||||
|
||||
* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
|
||||
but for markdown.
|
||||
|
||||
* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex):
|
||||
renders output as LaTeX.
|
||||
|
||||
* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience
|
||||
integration with the [Chroma](https://github.com/alecthomas/chroma) code
|
||||
highlighting library. bfchroma is only compatible with v2 of Blackfriday and
|
||||
provides a drop-in renderer ready to use with Blackfriday, as well as
|
||||
options and means for further customization.
|
||||
|
||||
* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
|
||||
|
||||
* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style
|
||||
|
||||
|
||||
TODO
|
||||
----
|
||||
|
||||
* More unit testing
|
||||
* Improve Unicode support. It does not understand all Unicode
|
||||
rules (about what constitutes a letter, a punctuation symbol,
|
||||
etc.), so it may fail to detect word boundaries correctly in
|
||||
some instances. It is safe on all UTF-8 input.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
|
||||
|
||||
|
||||
[1]: https://daringfireball.net/projects/markdown/ "Markdown"
|
||||
[2]: https://golang.org/ "Go Language"
|
||||
[3]: https://github.com/vmg/sundown "Sundown"
|
||||
[4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func"
|
||||
[5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
|
||||
|
||||
[BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2
|
||||
[BuildV2URL]: https://travis-ci.org/russross/blackfriday
|
||||
[PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2
|
||||
[PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2
|
||||
1612
man/vendor/github.com/russross/blackfriday/v2/block.go
generated
vendored
Normal file
1612
man/vendor/github.com/russross/blackfriday/v2/block.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
46
man/vendor/github.com/russross/blackfriday/v2/doc.go
generated
vendored
Normal file
46
man/vendor/github.com/russross/blackfriday/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Package blackfriday is a markdown processor.
|
||||
//
|
||||
// It translates plain text with simple formatting rules into an AST, which can
|
||||
// then be further processed to HTML (provided by Blackfriday itself) or other
|
||||
// formats (provided by the community).
|
||||
//
|
||||
// The simplest way to invoke Blackfriday is to call the Run function. It will
|
||||
// take a text input and produce a text output in HTML (or other format).
|
||||
//
|
||||
// A slightly more sophisticated way to use Blackfriday is to create a Markdown
|
||||
// processor and to call Parse, which returns a syntax tree for the input
|
||||
// document. You can leverage Blackfriday's parsing for content extraction from
|
||||
// markdown documents. You can assign a custom renderer and set various options
|
||||
// to the Markdown processor.
|
||||
//
|
||||
// If you're interested in calling Blackfriday from command line, see
|
||||
// https://github.com/russross/blackfriday-tool.
|
||||
//
|
||||
// Sanitized Anchor Names
|
||||
//
|
||||
// Blackfriday includes an algorithm for creating sanitized anchor names
|
||||
// corresponding to a given input text. This algorithm is used to create
|
||||
// anchors for headings when AutoHeadingIDs extension is enabled. The
|
||||
// algorithm is specified below, so that other packages can create
|
||||
// compatible anchor names and links to those anchors.
|
||||
//
|
||||
// The algorithm iterates over the input text, interpreted as UTF-8,
|
||||
// one Unicode code point (rune) at a time. All runes that are letters (category L)
|
||||
// or numbers (category N) are considered valid characters. They are mapped to
|
||||
// lower case, and included in the output. All other runes are considered
|
||||
// invalid characters. Invalid characters that precede the first valid character,
|
||||
// as well as invalid character that follow the last valid character
|
||||
// are dropped completely. All other sequences of invalid characters
|
||||
// between two valid characters are replaced with a single dash character '-'.
|
||||
//
|
||||
// SanitizedAnchorName exposes this functionality, and can be used to
|
||||
// create compatible links to the anchor names generated by blackfriday.
|
||||
// This algorithm is also implemented in a small standalone package at
|
||||
// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
|
||||
// that want a small package and don't need full functionality of blackfriday.
|
||||
package blackfriday
|
||||
|
||||
// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
|
||||
// github.com/shurcooL/sanitized_anchor_name.
|
||||
// Otherwise, users of sanitized_anchor_name will get anchor names
|
||||
// that are incompatible with those generated by blackfriday.
|
||||
2236
man/vendor/github.com/russross/blackfriday/v2/entities.go
generated
vendored
Normal file
2236
man/vendor/github.com/russross/blackfriday/v2/entities.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
70
man/vendor/github.com/russross/blackfriday/v2/esc.go
generated
vendored
Normal file
70
man/vendor/github.com/russross/blackfriday/v2/esc.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package blackfriday
|
||||
|
||||
import (
|
||||
"html"
|
||||
"io"
|
||||
)
|
||||
|
||||
var htmlEscaper = [256][]byte{
|
||||
'&': []byte("&"),
|
||||
'<': []byte("<"),
|
||||
'>': []byte(">"),
|
||||
'"': []byte("""),
|
||||
}
|
||||
|
||||
func escapeHTML(w io.Writer, s []byte) {
|
||||
escapeEntities(w, s, false)
|
||||
}
|
||||
|
||||
func escapeAllHTML(w io.Writer, s []byte) {
|
||||
escapeEntities(w, s, true)
|
||||
}
|
||||
|
||||
func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) {
|
||||
var start, end int
|
||||
for end < len(s) {
|
||||
escSeq := htmlEscaper[s[end]]
|
||||
if escSeq != nil {
|
||||
isEntity, entityEnd := nodeIsEntity(s, end)
|
||||
if isEntity && !escapeValidEntities {
|
||||
w.Write(s[start : entityEnd+1])
|
||||
start = entityEnd + 1
|
||||
} else {
|
||||
w.Write(s[start:end])
|
||||
w.Write(escSeq)
|
||||
start = end + 1
|
||||
}
|
||||
}
|
||||
end++
|
||||
}
|
||||
if start < len(s) && end <= len(s) {
|
||||
w.Write(s[start:end])
|
||||
}
|
||||
}
|
||||
|
||||
func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) {
|
||||
isEntity = false
|
||||
endEntityPos = end + 1
|
||||
|
||||
if s[end] == '&' {
|
||||
for endEntityPos < len(s) {
|
||||
if s[endEntityPos] == ';' {
|
||||
if entities[string(s[end:endEntityPos+1])] {
|
||||
isEntity = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' {
|
||||
break
|
||||
}
|
||||
endEntityPos++
|
||||
}
|
||||
}
|
||||
|
||||
return isEntity, endEntityPos
|
||||
}
|
||||
|
||||
func escLink(w io.Writer, text []byte) {
|
||||
unesc := html.UnescapeString(string(text))
|
||||
escapeHTML(w, []byte(unesc))
|
||||
}
|
||||
952
man/vendor/github.com/russross/blackfriday/v2/html.go
generated
vendored
Normal file
952
man/vendor/github.com/russross/blackfriday/v2/html.go
generated
vendored
Normal file
@@ -0,0 +1,952 @@
|
||||
//
|
||||
// Blackfriday Markdown Processor
|
||||
// Available at http://github.com/russross/blackfriday
|
||||
//
|
||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||||
// Distributed under the Simplified BSD License.
|
||||
// See README.md for details.
|
||||
//
|
||||
|
||||
//
|
||||
//
|
||||
// HTML rendering backend
|
||||
//
|
||||
//
|
||||
|
||||
package blackfriday
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HTMLFlags control optional behavior of HTML renderer.
|
||||
type HTMLFlags int
|
||||
|
||||
// HTML renderer configuration options.
|
||||
const (
|
||||
HTMLFlagsNone HTMLFlags = 0
|
||||
SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
|
||||
SkipImages // Skip embedded images
|
||||
SkipLinks // Skip all links
|
||||
Safelink // Only link to trusted protocols
|
||||
NofollowLinks // Only link with rel="nofollow"
|
||||
NoreferrerLinks // Only link with rel="noreferrer"
|
||||
NoopenerLinks // Only link with rel="noopener"
|
||||
HrefTargetBlank // Add a blank target
|
||||
CompletePage // Generate a complete HTML page
|
||||
UseXHTML // Generate XHTML output instead of HTML
|
||||
FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
|
||||
Smartypants // Enable smart punctuation substitutions
|
||||
SmartypantsFractions // Enable smart fractions (with Smartypants)
|
||||
SmartypantsDashes // Enable smart dashes (with Smartypants)
|
||||
SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
|
||||
SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
|
||||
SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
|
||||
TOC // Generate a table of contents
|
||||
)
|
||||
|
||||
var (
|
||||
htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
|
||||
)
|
||||
|
||||
const (
|
||||
htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
|
||||
processingInstruction + "|" + declaration + "|" + cdata + ")"
|
||||
closeTag = "</" + tagName + "\\s*[>]"
|
||||
openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
|
||||
attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
|
||||
attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
|
||||
attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
|
||||
attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
|
||||
cdata = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>"
|
||||
declaration = "<![A-Z]+" + "\\s+[^>]*>"
|
||||
doubleQuotedValue = "\"[^\"]*\""
|
||||
htmlComment = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->"
|
||||
processingInstruction = "[<][?].*?[?][>]"
|
||||
singleQuotedValue = "'[^']*'"
|
||||
tagName = "[A-Za-z][A-Za-z0-9-]*"
|
||||
unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
|
||||
)
|
||||
|
||||
// HTMLRendererParameters is a collection of supplementary parameters tweaking
|
||||
// the behavior of various parts of HTML renderer.
|
||||
type HTMLRendererParameters struct {
|
||||
// Prepend this text to each relative URL.
|
||||
AbsolutePrefix string
|
||||
// Add this text to each footnote anchor, to ensure uniqueness.
|
||||
FootnoteAnchorPrefix string
|
||||
// Show this text inside the <a> tag for a footnote return link, if the
|
||||
// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
|
||||
// <sup>[return]</sup> is used.
|
||||
FootnoteReturnLinkContents string
|
||||
// If set, add this text to the front of each Heading ID, to ensure
|
||||
// uniqueness.
|
||||
HeadingIDPrefix string
|
||||
// If set, add this text to the back of each Heading ID, to ensure uniqueness.
|
||||
HeadingIDSuffix string
|
||||
// Increase heading levels: if the offset is 1, <h1> becomes <h2> etc.
|
||||
// Negative offset is also valid.
|
||||
// Resulting levels are clipped between 1 and 6.
|
||||
HeadingLevelOffset int
|
||||
|
||||
Title string // Document title (used if CompletePage is set)
|
||||
CSS string // Optional CSS file URL (used if CompletePage is set)
|
||||
Icon string // Optional icon file URL (used if CompletePage is set)
|
||||
|
||||
Flags HTMLFlags // Flags allow customizing this renderer's behavior
|
||||
}
|
||||
|
||||
// HTMLRenderer is a type that implements the Renderer interface for HTML output.
|
||||
//
|
||||
// Do not create this directly, instead use the NewHTMLRenderer function.
|
||||
type HTMLRenderer struct {
|
||||
HTMLRendererParameters
|
||||
|
||||
closeTag string // how to end singleton tags: either " />" or ">"
|
||||
|
||||
// Track heading IDs to prevent ID collision in a single generation.
|
||||
headingIDs map[string]int
|
||||
|
||||
lastOutputLen int
|
||||
disableTags int
|
||||
|
||||
sr *SPRenderer
|
||||
}
|
||||
|
||||
const (
|
||||
xhtmlClose = " />"
|
||||
htmlClose = ">"
|
||||
)
|
||||
|
||||
// NewHTMLRenderer creates and configures an HTMLRenderer object, which
|
||||
// satisfies the Renderer interface.
|
||||
func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
|
||||
// configure the rendering engine
|
||||
closeTag := htmlClose
|
||||
if params.Flags&UseXHTML != 0 {
|
||||
closeTag = xhtmlClose
|
||||
}
|
||||
|
||||
if params.FootnoteReturnLinkContents == "" {
|
||||
// U+FE0E is VARIATION SELECTOR-15.
|
||||
// It suppresses automatic emoji presentation of the preceding
|
||||
// U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS.
|
||||
params.FootnoteReturnLinkContents = "<span aria-label='Return'>↩\ufe0e</span>"
|
||||
}
|
||||
|
||||
return &HTMLRenderer{
|
||||
HTMLRendererParameters: params,
|
||||
|
||||
closeTag: closeTag,
|
||||
headingIDs: make(map[string]int),
|
||||
|
||||
sr: NewSmartypantsRenderer(params.Flags),
|
||||
}
|
||||
}
|
||||
|
||||
func isHTMLTag(tag []byte, tagname string) bool {
|
||||
found, _ := findHTMLTagPos(tag, tagname)
|
||||
return found
|
||||
}
|
||||
|
||||
// Look for a character, but ignore it when it's in any kind of quotes, it
|
||||
// might be JavaScript
|
||||
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
|
||||
inSingleQuote := false
|
||||
inDoubleQuote := false
|
||||
inGraveQuote := false
|
||||
i := start
|
||||
for i < len(html) {
|
||||
switch {
|
||||
case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
|
||||
return i
|
||||
case html[i] == '\'':
|
||||
inSingleQuote = !inSingleQuote
|
||||
case html[i] == '"':
|
||||
inDoubleQuote = !inDoubleQuote
|
||||
case html[i] == '`':
|
||||
inGraveQuote = !inGraveQuote
|
||||
}
|
||||
i++
|
||||
}
|
||||
return start
|
||||
}
|
||||
|
||||
func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
|
||||
i := 0
|
||||
if i < len(tag) && tag[0] != '<' {
|
||||
return false, -1
|
||||
}
|
||||
i++
|
||||
i = skipSpace(tag, i)
|
||||
|
||||
if i < len(tag) && tag[i] == '/' {
|
||||
i++
|
||||
}
|
||||
|
||||
i = skipSpace(tag, i)
|
||||
j := 0
|
||||
for ; i < len(tag); i, j = i+1, j+1 {
|
||||
if j >= len(tagname) {
|
||||
break
|
||||
}
|
||||
|
||||
if strings.ToLower(string(tag[i]))[0] != tagname[j] {
|
||||
return false, -1
|
||||
}
|
||||
}
|
||||
|
||||
if i == len(tag) {
|
||||
return false, -1
|
||||
}
|
||||
|
||||
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
|
||||
if rightAngle >= i {
|
||||
return true, rightAngle
|
||||
}
|
||||
|
||||
return false, -1
|
||||
}
|
||||
|
||||
func skipSpace(tag []byte, i int) int {
|
||||
for i < len(tag) && isspace(tag[i]) {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func isRelativeLink(link []byte) (yes bool) {
|
||||
// a tag begin with '#'
|
||||
if link[0] == '#' {
|
||||
return true
|
||||
}
|
||||
|
||||
// link begin with '/' but not '//', the second maybe a protocol relative link
|
||||
if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
|
||||
return true
|
||||
}
|
||||
|
||||
// only the root '/'
|
||||
if len(link) == 1 && link[0] == '/' {
|
||||
return true
|
||||
}
|
||||
|
||||
// current directory : begin with "./"
|
||||
if bytes.HasPrefix(link, []byte("./")) {
|
||||
return true
|
||||
}
|
||||
|
||||
// parent directory : begin with "../"
|
||||
if bytes.HasPrefix(link, []byte("../")) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
|
||||
for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
|
||||
tmp := fmt.Sprintf("%s-%d", id, count+1)
|
||||
|
||||
if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
|
||||
r.headingIDs[id] = count + 1
|
||||
id = tmp
|
||||
} else {
|
||||
id = id + "-1"
|
||||
}
|
||||
}
|
||||
|
||||
if _, found := r.headingIDs[id]; !found {
|
||||
r.headingIDs[id] = 0
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
|
||||
if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
||||
newDest := r.AbsolutePrefix
|
||||
if link[0] != '/' {
|
||||
newDest += "/"
|
||||
}
|
||||
newDest += string(link)
|
||||
return []byte(newDest)
|
||||
}
|
||||
return link
|
||||
}
|
||||
|
||||
func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
|
||||
if isRelativeLink(link) {
|
||||
return attrs
|
||||
}
|
||||
val := []string{}
|
||||
if flags&NofollowLinks != 0 {
|
||||
val = append(val, "nofollow")
|
||||
}
|
||||
if flags&NoreferrerLinks != 0 {
|
||||
val = append(val, "noreferrer")
|
||||
}
|
||||
if flags&NoopenerLinks != 0 {
|
||||
val = append(val, "noopener")
|
||||
}
|
||||
if flags&HrefTargetBlank != 0 {
|
||||
attrs = append(attrs, "target=\"_blank\"")
|
||||
}
|
||||
if len(val) == 0 {
|
||||
return attrs
|
||||
}
|
||||
attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
|
||||
return append(attrs, attr)
|
||||
}
|
||||
|
||||
func isMailto(link []byte) bool {
|
||||
return bytes.HasPrefix(link, []byte("mailto:"))
|
||||
}
|
||||
|
||||
func needSkipLink(flags HTMLFlags, dest []byte) bool {
|
||||
if flags&SkipLinks != 0 {
|
||||
return true
|
||||
}
|
||||
return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
|
||||
}
|
||||
|
||||
func isSmartypantable(node *Node) bool {
|
||||
pt := node.Parent.Type
|
||||
return pt != Link && pt != CodeBlock && pt != Code
|
||||
}
|
||||
|
||||
func appendLanguageAttr(attrs []string, info []byte) []string {
|
||||
if len(info) == 0 {
|
||||
return attrs
|
||||
}
|
||||
endOfLang := bytes.IndexAny(info, "\t ")
|
||||
if endOfLang < 0 {
|
||||
endOfLang = len(info)
|
||||
}
|
||||
return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
|
||||
w.Write(name)
|
||||
if len(attrs) > 0 {
|
||||
w.Write(spaceBytes)
|
||||
w.Write([]byte(strings.Join(attrs, " ")))
|
||||
}
|
||||
w.Write(gtBytes)
|
||||
r.lastOutputLen = 1
|
||||
}
|
||||
|
||||
func footnoteRef(prefix string, node *Node) []byte {
|
||||
urlFrag := prefix + string(slugify(node.Destination))
|
||||
anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID)
|
||||
return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor))
|
||||
}
|
||||
|
||||
func footnoteItem(prefix string, slug []byte) []byte {
|
||||
return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug))
|
||||
}
|
||||
|
||||
func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
|
||||
const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>`
|
||||
return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
|
||||
}
|
||||
|
||||
func itemOpenCR(node *Node) bool {
|
||||
if node.Prev == nil {
|
||||
return false
|
||||
}
|
||||
ld := node.Parent.ListData
|
||||
return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
|
||||
}
|
||||
|
||||
func skipParagraphTags(node *Node) bool {
|
||||
grandparent := node.Parent.Parent
|
||||
if grandparent == nil || grandparent.Type != List {
|
||||
return false
|
||||
}
|
||||
tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
|
||||
return grandparent.Type == List && tightOrTerm
|
||||
}
|
||||
|
||||
func cellAlignment(align CellAlignFlags) string {
|
||||
switch align {
|
||||
case TableAlignmentLeft:
|
||||
return "left"
|
||||
case TableAlignmentRight:
|
||||
return "right"
|
||||
case TableAlignmentCenter:
|
||||
return "center"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) out(w io.Writer, text []byte) {
|
||||
if r.disableTags > 0 {
|
||||
w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
|
||||
} else {
|
||||
w.Write(text)
|
||||
}
|
||||
r.lastOutputLen = len(text)
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) cr(w io.Writer) {
|
||||
if r.lastOutputLen > 0 {
|
||||
r.out(w, nlBytes)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
nlBytes = []byte{'\n'}
|
||||
gtBytes = []byte{'>'}
|
||||
spaceBytes = []byte{' '}
|
||||
)
|
||||
|
||||
var (
|
||||
brTag = []byte("<br>")
|
||||
brXHTMLTag = []byte("<br />")
|
||||
emTag = []byte("<em>")
|
||||
emCloseTag = []byte("</em>")
|
||||
strongTag = []byte("<strong>")
|
||||
strongCloseTag = []byte("</strong>")
|
||||
delTag = []byte("<del>")
|
||||
delCloseTag = []byte("</del>")
|
||||
ttTag = []byte("<tt>")
|
||||
ttCloseTag = []byte("</tt>")
|
||||
aTag = []byte("<a")
|
||||
aCloseTag = []byte("</a>")
|
||||
preTag = []byte("<pre>")
|
||||
preCloseTag = []byte("</pre>")
|
||||
codeTag = []byte("<code>")
|
||||
codeCloseTag = []byte("</code>")
|
||||
pTag = []byte("<p>")
|
||||
pCloseTag = []byte("</p>")
|
||||
blockquoteTag = []byte("<blockquote>")
|
||||
blockquoteCloseTag = []byte("</blockquote>")
|
||||
hrTag = []byte("<hr>")
|
||||
hrXHTMLTag = []byte("<hr />")
|
||||
ulTag = []byte("<ul>")
|
||||
ulCloseTag = []byte("</ul>")
|
||||
olTag = []byte("<ol>")
|
||||
olCloseTag = []byte("</ol>")
|
||||
dlTag = []byte("<dl>")
|
||||
dlCloseTag = []byte("</dl>")
|
||||
liTag = []byte("<li>")
|
||||
liCloseTag = []byte("</li>")
|
||||
ddTag = []byte("<dd>")
|
||||
ddCloseTag = []byte("</dd>")
|
||||
dtTag = []byte("<dt>")
|
||||
dtCloseTag = []byte("</dt>")
|
||||
tableTag = []byte("<table>")
|
||||
tableCloseTag = []byte("</table>")
|
||||
tdTag = []byte("<td")
|
||||
tdCloseTag = []byte("</td>")
|
||||
thTag = []byte("<th")
|
||||
thCloseTag = []byte("</th>")
|
||||
theadTag = []byte("<thead>")
|
||||
theadCloseTag = []byte("</thead>")
|
||||
tbodyTag = []byte("<tbody>")
|
||||
tbodyCloseTag = []byte("</tbody>")
|
||||
trTag = []byte("<tr>")
|
||||
trCloseTag = []byte("</tr>")
|
||||
h1Tag = []byte("<h1")
|
||||
h1CloseTag = []byte("</h1>")
|
||||
h2Tag = []byte("<h2")
|
||||
h2CloseTag = []byte("</h2>")
|
||||
h3Tag = []byte("<h3")
|
||||
h3CloseTag = []byte("</h3>")
|
||||
h4Tag = []byte("<h4")
|
||||
h4CloseTag = []byte("</h4>")
|
||||
h5Tag = []byte("<h5")
|
||||
h5CloseTag = []byte("</h5>")
|
||||
h6Tag = []byte("<h6")
|
||||
h6CloseTag = []byte("</h6>")
|
||||
|
||||
footnotesDivBytes = []byte("\n<div class=\"footnotes\">\n\n")
|
||||
footnotesCloseDivBytes = []byte("\n</div>\n")
|
||||
)
|
||||
|
||||
func headingTagsFromLevel(level int) ([]byte, []byte) {
|
||||
if level <= 1 {
|
||||
return h1Tag, h1CloseTag
|
||||
}
|
||||
switch level {
|
||||
case 2:
|
||||
return h2Tag, h2CloseTag
|
||||
case 3:
|
||||
return h3Tag, h3CloseTag
|
||||
case 4:
|
||||
return h4Tag, h4CloseTag
|
||||
case 5:
|
||||
return h5Tag, h5CloseTag
|
||||
}
|
||||
return h6Tag, h6CloseTag
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) outHRTag(w io.Writer) {
|
||||
if r.Flags&UseXHTML == 0 {
|
||||
r.out(w, hrTag)
|
||||
} else {
|
||||
r.out(w, hrXHTMLTag)
|
||||
}
|
||||
}
|
||||
|
||||
// RenderNode is a default renderer of a single node of a syntax tree. For
|
||||
// block nodes it will be called twice: first time with entering=true, second
|
||||
// time with entering=false, so that it could know when it's working on an open
|
||||
// tag and when on close. It writes the result to w.
|
||||
//
|
||||
// The return value is a way to tell the calling walker to adjust its walk
|
||||
// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
|
||||
// can ask the walker to skip a subtree of this node by returning SkipChildren.
|
||||
// The typical behavior is to return GoToNext, which asks for the usual
|
||||
// traversal to the next node.
|
||||
func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
|
||||
attrs := []string{}
|
||||
switch node.Type {
|
||||
case Text:
|
||||
if r.Flags&Smartypants != 0 {
|
||||
var tmp bytes.Buffer
|
||||
escapeHTML(&tmp, node.Literal)
|
||||
r.sr.Process(w, tmp.Bytes())
|
||||
} else {
|
||||
if node.Parent.Type == Link {
|
||||
escLink(w, node.Literal)
|
||||
} else {
|
||||
escapeHTML(w, node.Literal)
|
||||
}
|
||||
}
|
||||
case Softbreak:
|
||||
r.cr(w)
|
||||
// TODO: make it configurable via out(renderer.softbreak)
|
||||
case Hardbreak:
|
||||
if r.Flags&UseXHTML == 0 {
|
||||
r.out(w, brTag)
|
||||
} else {
|
||||
r.out(w, brXHTMLTag)
|
||||
}
|
||||
r.cr(w)
|
||||
case Emph:
|
||||
if entering {
|
||||
r.out(w, emTag)
|
||||
} else {
|
||||
r.out(w, emCloseTag)
|
||||
}
|
||||
case Strong:
|
||||
if entering {
|
||||
r.out(w, strongTag)
|
||||
} else {
|
||||
r.out(w, strongCloseTag)
|
||||
}
|
||||
case Del:
|
||||
if entering {
|
||||
r.out(w, delTag)
|
||||
} else {
|
||||
r.out(w, delCloseTag)
|
||||
}
|
||||
case HTMLSpan:
|
||||
if r.Flags&SkipHTML != 0 {
|
||||
break
|
||||
}
|
||||
r.out(w, node.Literal)
|
||||
case Link:
|
||||
// mark it but don't link it if it is not a safe link: no smartypants
|
||||
dest := node.LinkData.Destination
|
||||
if needSkipLink(r.Flags, dest) {
|
||||
if entering {
|
||||
r.out(w, ttTag)
|
||||
} else {
|
||||
r.out(w, ttCloseTag)
|
||||
}
|
||||
} else {
|
||||
if entering {
|
||||
dest = r.addAbsPrefix(dest)
|
||||
var hrefBuf bytes.Buffer
|
||||
hrefBuf.WriteString("href=\"")
|
||||
escLink(&hrefBuf, dest)
|
||||
hrefBuf.WriteByte('"')
|
||||
attrs = append(attrs, hrefBuf.String())
|
||||
if node.NoteID != 0 {
|
||||
r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
|
||||
break
|
||||
}
|
||||
attrs = appendLinkAttrs(attrs, r.Flags, dest)
|
||||
if len(node.LinkData.Title) > 0 {
|
||||
var titleBuff bytes.Buffer
|
||||
titleBuff.WriteString("title=\"")
|
||||
escapeHTML(&titleBuff, node.LinkData.Title)
|
||||
titleBuff.WriteByte('"')
|
||||
attrs = append(attrs, titleBuff.String())
|
||||
}
|
||||
r.tag(w, aTag, attrs)
|
||||
} else {
|
||||
if node.NoteID != 0 {
|
||||
break
|
||||
}
|
||||
r.out(w, aCloseTag)
|
||||
}
|
||||
}
|
||||
case Image:
|
||||
if r.Flags&SkipImages != 0 {
|
||||
return SkipChildren
|
||||
}
|
||||
if entering {
|
||||
dest := node.LinkData.Destination
|
||||
dest = r.addAbsPrefix(dest)
|
||||
if r.disableTags == 0 {
|
||||
//if options.safe && potentiallyUnsafe(dest) {
|
||||
//out(w, `<img src="" alt="`)
|
||||
//} else {
|
||||
r.out(w, []byte(`<img src="`))
|
||||
escLink(w, dest)
|
||||
r.out(w, []byte(`" alt="`))
|
||||
//}
|
||||
}
|
||||
r.disableTags++
|
||||
} else {
|
||||
r.disableTags--
|
||||
if r.disableTags == 0 {
|
||||
if node.LinkData.Title != nil {
|
||||
r.out(w, []byte(`" title="`))
|
||||
escapeHTML(w, node.LinkData.Title)
|
||||
}
|
||||
r.out(w, []byte(`" />`))
|
||||
}
|
||||
}
|
||||
case Code:
|
||||
r.out(w, codeTag)
|
||||
escapeAllHTML(w, node.Literal)
|
||||
r.out(w, codeCloseTag)
|
||||
case Document:
|
||||
break
|
||||
case Paragraph:
|
||||
if skipParagraphTags(node) {
|
||||
break
|
||||
}
|
||||
if entering {
|
||||
// TODO: untangle this clusterfuck about when the newlines need
|
||||
// to be added and when not.
|
||||
if node.Prev != nil {
|
||||
switch node.Prev.Type {
|
||||
case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
|
||||
r.cr(w)
|
||||
}
|
||||
}
|
||||
if node.Parent.Type == BlockQuote && node.Prev == nil {
|
||||
r.cr(w)
|
||||
}
|
||||
r.out(w, pTag)
|
||||
} else {
|
||||
r.out(w, pCloseTag)
|
||||
if !(node.Parent.Type == Item && node.Next == nil) {
|
||||
r.cr(w)
|
||||
}
|
||||
}
|
||||
case BlockQuote:
|
||||
if entering {
|
||||
r.cr(w)
|
||||
r.out(w, blockquoteTag)
|
||||
} else {
|
||||
r.out(w, blockquoteCloseTag)
|
||||
r.cr(w)
|
||||
}
|
||||
case HTMLBlock:
|
||||
if r.Flags&SkipHTML != 0 {
|
||||
break
|
||||
}
|
||||
r.cr(w)
|
||||
r.out(w, node.Literal)
|
||||
r.cr(w)
|
||||
case Heading:
|
||||
headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
|
||||
openTag, closeTag := headingTagsFromLevel(headingLevel)
|
||||
if entering {
|
||||
if node.IsTitleblock {
|
||||
attrs = append(attrs, `class="title"`)
|
||||
}
|
||||
if node.HeadingID != "" {
|
||||
id := r.ensureUniqueHeadingID(node.HeadingID)
|
||||
if r.HeadingIDPrefix != "" {
|
||||
id = r.HeadingIDPrefix + id
|
||||
}
|
||||
if r.HeadingIDSuffix != "" {
|
||||
id = id + r.HeadingIDSuffix
|
||||
}
|
||||
attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
|
||||
}
|
||||
r.cr(w)
|
||||
r.tag(w, openTag, attrs)
|
||||
} else {
|
||||
r.out(w, closeTag)
|
||||
if !(node.Parent.Type == Item && node.Next == nil) {
|
||||
r.cr(w)
|
||||
}
|
||||
}
|
||||
case HorizontalRule:
|
||||
r.cr(w)
|
||||
r.outHRTag(w)
|
||||
r.cr(w)
|
||||
case List:
|
||||
openTag := ulTag
|
||||
closeTag := ulCloseTag
|
||||
if node.ListFlags&ListTypeOrdered != 0 {
|
||||
openTag = olTag
|
||||
closeTag = olCloseTag
|
||||
}
|
||||
if node.ListFlags&ListTypeDefinition != 0 {
|
||||
openTag = dlTag
|
||||
closeTag = dlCloseTag
|
||||
}
|
||||
if entering {
|
||||
if node.IsFootnotesList {
|
||||
r.out(w, footnotesDivBytes)
|
||||
r.outHRTag(w)
|
||||
r.cr(w)
|
||||
}
|
||||
r.cr(w)
|
||||
if node.Parent.Type == Item && node.Parent.Parent.Tight {
|
||||
r.cr(w)
|
||||
}
|
||||
r.tag(w, openTag[:len(openTag)-1], attrs)
|
||||
r.cr(w)
|
||||
} else {
|
||||
r.out(w, closeTag)
|
||||
//cr(w)
|
||||
//if node.parent.Type != Item {
|
||||
// cr(w)
|
||||
//}
|
||||
if node.Parent.Type == Item && node.Next != nil {
|
||||
r.cr(w)
|
||||
}
|
||||
if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
|
||||
r.cr(w)
|
||||
}
|
||||
if node.IsFootnotesList {
|
||||
r.out(w, footnotesCloseDivBytes)
|
||||
}
|
||||
}
|
||||
case Item:
|
||||
openTag := liTag
|
||||
closeTag := liCloseTag
|
||||
if node.ListFlags&ListTypeDefinition != 0 {
|
||||
openTag = ddTag
|
||||
closeTag = ddCloseTag
|
||||
}
|
||||
if node.ListFlags&ListTypeTerm != 0 {
|
||||
openTag = dtTag
|
||||
closeTag = dtCloseTag
|
||||
}
|
||||
if entering {
|
||||
if itemOpenCR(node) {
|
||||
r.cr(w)
|
||||
}
|
||||
if node.ListData.RefLink != nil {
|
||||
slug := slugify(node.ListData.RefLink)
|
||||
r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
|
||||
break
|
||||
}
|
||||
r.out(w, openTag)
|
||||
} else {
|
||||
if node.ListData.RefLink != nil {
|
||||
slug := slugify(node.ListData.RefLink)
|
||||
if r.Flags&FootnoteReturnLinks != 0 {
|
||||
r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
|
||||
}
|
||||
}
|
||||
r.out(w, closeTag)
|
||||
r.cr(w)
|
||||
}
|
||||
case CodeBlock:
|
||||
attrs = appendLanguageAttr(attrs, node.Info)
|
||||
r.cr(w)
|
||||
r.out(w, preTag)
|
||||
r.tag(w, codeTag[:len(codeTag)-1], attrs)
|
||||
escapeAllHTML(w, node.Literal)
|
||||
r.out(w, codeCloseTag)
|
||||
r.out(w, preCloseTag)
|
||||
if node.Parent.Type != Item {
|
||||
r.cr(w)
|
||||
}
|
||||
case Table:
|
||||
if entering {
|
||||
r.cr(w)
|
||||
r.out(w, tableTag)
|
||||
} else {
|
||||
r.out(w, tableCloseTag)
|
||||
r.cr(w)
|
||||
}
|
||||
case TableCell:
|
||||
openTag := tdTag
|
||||
closeTag := tdCloseTag
|
||||
if node.IsHeader {
|
||||
openTag = thTag
|
||||
closeTag = thCloseTag
|
||||
}
|
||||
if entering {
|
||||
align := cellAlignment(node.Align)
|
||||
if align != "" {
|
||||
attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
|
||||
}
|
||||
if node.Prev == nil {
|
||||
r.cr(w)
|
||||
}
|
||||
r.tag(w, openTag, attrs)
|
||||
} else {
|
||||
r.out(w, closeTag)
|
||||
r.cr(w)
|
||||
}
|
||||
case TableHead:
|
||||
if entering {
|
||||
r.cr(w)
|
||||
r.out(w, theadTag)
|
||||
} else {
|
||||
r.out(w, theadCloseTag)
|
||||
r.cr(w)
|
||||
}
|
||||
case TableBody:
|
||||
if entering {
|
||||
r.cr(w)
|
||||
r.out(w, tbodyTag)
|
||||
// XXX: this is to adhere to a rather silly test. Should fix test.
|
||||
if node.FirstChild == nil {
|
||||
r.cr(w)
|
||||
}
|
||||
} else {
|
||||
r.out(w, tbodyCloseTag)
|
||||
r.cr(w)
|
||||
}
|
||||
case TableRow:
|
||||
if entering {
|
||||
r.cr(w)
|
||||
r.out(w, trTag)
|
||||
} else {
|
||||
r.out(w, trCloseTag)
|
||||
r.cr(w)
|
||||
}
|
||||
default:
|
||||
panic("Unknown node type " + node.Type.String())
|
||||
}
|
||||
return GoToNext
|
||||
}
|
||||
|
||||
// RenderHeader writes HTML document preamble and TOC if requested.
|
||||
func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
|
||||
r.writeDocumentHeader(w)
|
||||
if r.Flags&TOC != 0 {
|
||||
r.writeTOC(w, ast)
|
||||
}
|
||||
}
|
||||
|
||||
// RenderFooter writes HTML document footer.
|
||||
func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
|
||||
if r.Flags&CompletePage == 0 {
|
||||
return
|
||||
}
|
||||
io.WriteString(w, "\n</body>\n</html>\n")
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) {
|
||||
if r.Flags&CompletePage == 0 {
|
||||
return
|
||||
}
|
||||
ending := ""
|
||||
if r.Flags&UseXHTML != 0 {
|
||||
io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
|
||||
io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
|
||||
io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
|
||||
ending = " /"
|
||||
} else {
|
||||
io.WriteString(w, "<!DOCTYPE html>\n")
|
||||
io.WriteString(w, "<html>\n")
|
||||
}
|
||||
io.WriteString(w, "<head>\n")
|
||||
io.WriteString(w, " <title>")
|
||||
if r.Flags&Smartypants != 0 {
|
||||
r.sr.Process(w, []byte(r.Title))
|
||||
} else {
|
||||
escapeHTML(w, []byte(r.Title))
|
||||
}
|
||||
io.WriteString(w, "</title>\n")
|
||||
io.WriteString(w, " <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
|
||||
io.WriteString(w, Version)
|
||||
io.WriteString(w, "\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
io.WriteString(w, " <meta charset=\"utf-8\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
if r.CSS != "" {
|
||||
io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
|
||||
escapeHTML(w, []byte(r.CSS))
|
||||
io.WriteString(w, "\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
}
|
||||
if r.Icon != "" {
|
||||
io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
|
||||
escapeHTML(w, []byte(r.Icon))
|
||||
io.WriteString(w, "\"")
|
||||
io.WriteString(w, ending)
|
||||
io.WriteString(w, ">\n")
|
||||
}
|
||||
io.WriteString(w, "</head>\n")
|
||||
io.WriteString(w, "<body>\n\n")
|
||||
}
|
||||
|
||||
func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) {
|
||||
buf := bytes.Buffer{}
|
||||
|
||||
inHeading := false
|
||||
tocLevel := 0
|
||||
headingCount := 0
|
||||
|
||||
ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||||
if node.Type == Heading && !node.HeadingData.IsTitleblock {
|
||||
inHeading = entering
|
||||
if entering {
|
||||
node.HeadingID = fmt.Sprintf("toc_%d", headingCount)
|
||||
if node.Level == tocLevel {
|
||||
buf.WriteString("</li>\n\n<li>")
|
||||
} else if node.Level < tocLevel {
|
||||
for node.Level < tocLevel {
|
||||
tocLevel--
|
||||
buf.WriteString("</li>\n</ul>")
|
||||
}
|
||||
buf.WriteString("</li>\n\n<li>")
|
||||
} else {
|
||||
for node.Level > tocLevel {
|
||||
tocLevel++
|
||||
buf.WriteString("\n<ul>\n<li>")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount)
|
||||
headingCount++
|
||||
} else {
|
||||
buf.WriteString("</a>")
|
||||
}
|
||||
return GoToNext
|
||||
}
|
||||
|
||||
if inHeading {
|
||||
return r.RenderNode(&buf, node, entering)
|
||||
}
|
||||
|
||||
return GoToNext
|
||||
})
|
||||
|
||||
for ; tocLevel > 0; tocLevel-- {
|
||||
buf.WriteString("</li>\n</ul>")
|
||||
}
|
||||
|
||||
if buf.Len() > 0 {
|
||||
io.WriteString(w, "<nav>\n")
|
||||
w.Write(buf.Bytes())
|
||||
io.WriteString(w, "\n\n</nav>\n")
|
||||
}
|
||||
r.lastOutputLen = buf.Len()
|
||||
}
|
||||
1228
man/vendor/github.com/russross/blackfriday/v2/inline.go
generated
vendored
Normal file
1228
man/vendor/github.com/russross/blackfriday/v2/inline.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
950
man/vendor/github.com/russross/blackfriday/v2/markdown.go
generated
vendored
Normal file
950
man/vendor/github.com/russross/blackfriday/v2/markdown.go
generated
vendored
Normal file
@@ -0,0 +1,950 @@
|
||||
// Blackfriday Markdown Processor
|
||||
// Available at http://github.com/russross/blackfriday
|
||||
//
|
||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||||
// Distributed under the Simplified BSD License.
|
||||
// See README.md for details.
|
||||
|
||||
package blackfriday
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
//
|
||||
// Markdown parsing and processing
|
||||
//
|
||||
|
||||
// Version string of the package. Appears in the rendered document when
|
||||
// CompletePage flag is on.
|
||||
const Version = "2.0"
|
||||
|
||||
// Extensions is a bitwise or'ed collection of enabled Blackfriday's
|
||||
// extensions.
|
||||
type Extensions int
|
||||
|
||||
// These are the supported markdown parsing extensions.
|
||||
// OR these values together to select multiple extensions.
|
||||
const (
|
||||
NoExtensions Extensions = 0
|
||||
NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
|
||||
Tables // Render tables
|
||||
FencedCode // Render fenced code blocks
|
||||
Autolink // Detect embedded URLs that are not explicitly marked
|
||||
Strikethrough // Strikethrough text using ~~test~~
|
||||
LaxHTMLBlocks // Loosen up HTML block parsing rules
|
||||
SpaceHeadings // Be strict about prefix heading rules
|
||||
HardLineBreak // Translate newlines into line breaks
|
||||
TabSizeEight // Expand tabs to eight spaces instead of four
|
||||
Footnotes // Pandoc-style footnotes
|
||||
NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
||||
HeadingIDs // specify heading IDs with {#id}
|
||||
Titleblock // Titleblock ala pandoc
|
||||
AutoHeadingIDs // Create the heading ID from the text
|
||||
BackslashLineBreak // Translate trailing backslashes into line breaks
|
||||
DefinitionLists // Render definition lists
|
||||
|
||||
CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants |
|
||||
SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
|
||||
|
||||
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
|
||||
Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
|
||||
BackslashLineBreak | DefinitionLists
|
||||
)
|
||||
|
||||
// ListType contains bitwise or'ed flags for list and list item objects.
|
||||
type ListType int
|
||||
|
||||
// These are the possible flag values for the ListItem renderer.
|
||||
// Multiple flag values may be ORed together.
|
||||
// These are mostly of interest if you are writing a new output format.
|
||||
const (
|
||||
ListTypeOrdered ListType = 1 << iota
|
||||
ListTypeDefinition
|
||||
ListTypeTerm
|
||||
|
||||
ListItemContainsBlock
|
||||
ListItemBeginningOfList // TODO: figure out if this is of any use now
|
||||
ListItemEndOfList
|
||||
)
|
||||
|
||||
// CellAlignFlags holds a type of alignment in a table cell.
|
||||
type CellAlignFlags int
|
||||
|
||||
// These are the possible flag values for the table cell renderer.
|
||||
// Only a single one of these values will be used; they are not ORed together.
|
||||
// These are mostly of interest if you are writing a new output format.
|
||||
const (
|
||||
TableAlignmentLeft CellAlignFlags = 1 << iota
|
||||
TableAlignmentRight
|
||||
TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
|
||||
)
|
||||
|
||||
// The size of a tab stop.
|
||||
const (
|
||||
TabSizeDefault = 4
|
||||
TabSizeDouble = 8
|
||||
)
|
||||
|
||||
// blockTags is a set of tags that are recognized as HTML block tags.
|
||||
// Any of these can be included in markdown text without special escaping.
|
||||
var blockTags = map[string]struct{}{
|
||||
"blockquote": {},
|
||||
"del": {},
|
||||
"div": {},
|
||||
"dl": {},
|
||||
"fieldset": {},
|
||||
"form": {},
|
||||
"h1": {},
|
||||
"h2": {},
|
||||
"h3": {},
|
||||
"h4": {},
|
||||
"h5": {},
|
||||
"h6": {},
|
||||
"iframe": {},
|
||||
"ins": {},
|
||||
"math": {},
|
||||
"noscript": {},
|
||||
"ol": {},
|
||||
"pre": {},
|
||||
"p": {},
|
||||
"script": {},
|
||||
"style": {},
|
||||
"table": {},
|
||||
"ul": {},
|
||||
|
||||
// HTML5
|
||||
"address": {},
|
||||
"article": {},
|
||||
"aside": {},
|
||||
"canvas": {},
|
||||
"figcaption": {},
|
||||
"figure": {},
|
||||
"footer": {},
|
||||
"header": {},
|
||||
"hgroup": {},
|
||||
"main": {},
|
||||
"nav": {},
|
||||
"output": {},
|
||||
"progress": {},
|
||||
"section": {},
|
||||
"video": {},
|
||||
}
|
||||
|
||||
// Renderer is the rendering interface. This is mostly of interest if you are
|
||||
// implementing a new rendering format.
|
||||
//
|
||||
// Only an HTML implementation is provided in this repository, see the README
|
||||
// for external implementations.
|
||||
type Renderer interface {
|
||||
// RenderNode is the main rendering method. It will be called once for
|
||||
// every leaf node and twice for every non-leaf node (first with
|
||||
// entering=true, then with entering=false). The method should write its
|
||||
// rendition of the node to the supplied writer w.
|
||||
RenderNode(w io.Writer, node *Node, entering bool) WalkStatus
|
||||
|
||||
// RenderHeader is a method that allows the renderer to produce some
|
||||
// content preceding the main body of the output document. The header is
|
||||
// understood in the broad sense here. For example, the default HTML
|
||||
// renderer will write not only the HTML document preamble, but also the
|
||||
// table of contents if it was requested.
|
||||
//
|
||||
// The method will be passed an entire document tree, in case a particular
|
||||
// implementation needs to inspect it to produce output.
|
||||
//
|
||||
// The output should be written to the supplied writer w. If your
|
||||
// implementation has no header to write, supply an empty implementation.
|
||||
RenderHeader(w io.Writer, ast *Node)
|
||||
|
||||
// RenderFooter is a symmetric counterpart of RenderHeader.
|
||||
RenderFooter(w io.Writer, ast *Node)
|
||||
}
|
||||
|
||||
// Callback functions for inline parsing. One such function is defined
|
||||
// for each character that triggers a response when parsing inline data.
|
||||
type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node)
|
||||
|
||||
// Markdown is a type that holds extensions and the runtime state used by
|
||||
// Parse, and the renderer. You can not use it directly, construct it with New.
|
||||
type Markdown struct {
|
||||
renderer Renderer
|
||||
referenceOverride ReferenceOverrideFunc
|
||||
refs map[string]*reference
|
||||
inlineCallback [256]inlineParser
|
||||
extensions Extensions
|
||||
nesting int
|
||||
maxNesting int
|
||||
insideLink bool
|
||||
|
||||
// Footnotes need to be ordered as well as available to quickly check for
|
||||
// presence. If a ref is also a footnote, it's stored both in refs and here
|
||||
// in notes. Slice is nil if footnotes not enabled.
|
||||
notes []*reference
|
||||
|
||||
doc *Node
|
||||
tip *Node // = doc
|
||||
oldTip *Node
|
||||
lastMatchedContainer *Node // = doc
|
||||
allClosed bool
|
||||
}
|
||||
|
||||
func (p *Markdown) getRef(refid string) (ref *reference, found bool) {
|
||||
if p.referenceOverride != nil {
|
||||
r, overridden := p.referenceOverride(refid)
|
||||
if overridden {
|
||||
if r == nil {
|
||||
return nil, false
|
||||
}
|
||||
return &reference{
|
||||
link: []byte(r.Link),
|
||||
title: []byte(r.Title),
|
||||
noteID: 0,
|
||||
hasBlock: false,
|
||||
text: []byte(r.Text)}, true
|
||||
}
|
||||
}
|
||||
// refs are case insensitive
|
||||
ref, found = p.refs[strings.ToLower(refid)]
|
||||
return ref, found
|
||||
}
|
||||
|
||||
func (p *Markdown) finalize(block *Node) {
|
||||
above := block.Parent
|
||||
block.open = false
|
||||
p.tip = above
|
||||
}
|
||||
|
||||
func (p *Markdown) addChild(node NodeType, offset uint32) *Node {
|
||||
return p.addExistingChild(NewNode(node), offset)
|
||||
}
|
||||
|
||||
func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node {
|
||||
for !p.tip.canContain(node.Type) {
|
||||
p.finalize(p.tip)
|
||||
}
|
||||
p.tip.AppendChild(node)
|
||||
p.tip = node
|
||||
return node
|
||||
}
|
||||
|
||||
func (p *Markdown) closeUnmatchedBlocks() {
|
||||
if !p.allClosed {
|
||||
for p.oldTip != p.lastMatchedContainer {
|
||||
parent := p.oldTip.Parent
|
||||
p.finalize(p.oldTip)
|
||||
p.oldTip = parent
|
||||
}
|
||||
p.allClosed = true
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// Public interface
|
||||
//
|
||||
//
|
||||
|
||||
// Reference represents the details of a link.
|
||||
// See the documentation in Options for more details on use-case.
|
||||
type Reference struct {
|
||||
// Link is usually the URL the reference points to.
|
||||
Link string
|
||||
// Title is the alternate text describing the link in more detail.
|
||||
Title string
|
||||
// Text is the optional text to override the ref with if the syntax used was
|
||||
// [refid][]
|
||||
Text string
|
||||
}
|
||||
|
||||
// ReferenceOverrideFunc is expected to be called with a reference string and
|
||||
// return either a valid Reference type that the reference string maps to or
|
||||
// nil. If overridden is false, the default reference logic will be executed.
|
||||
// See the documentation in Options for more details on use-case.
|
||||
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
||||
|
||||
// New constructs a Markdown processor. You can use the same With* functions as
|
||||
// for Run() to customize parser's behavior and the renderer.
|
||||
func New(opts ...Option) *Markdown {
|
||||
var p Markdown
|
||||
for _, opt := range opts {
|
||||
opt(&p)
|
||||
}
|
||||
p.refs = make(map[string]*reference)
|
||||
p.maxNesting = 16
|
||||
p.insideLink = false
|
||||
docNode := NewNode(Document)
|
||||
p.doc = docNode
|
||||
p.tip = docNode
|
||||
p.oldTip = docNode
|
||||
p.lastMatchedContainer = docNode
|
||||
p.allClosed = true
|
||||
// register inline parsers
|
||||
p.inlineCallback[' '] = maybeLineBreak
|
||||
p.inlineCallback['*'] = emphasis
|
||||
p.inlineCallback['_'] = emphasis
|
||||
if p.extensions&Strikethrough != 0 {
|
||||
p.inlineCallback['~'] = emphasis
|
||||
}
|
||||
p.inlineCallback['`'] = codeSpan
|
||||
p.inlineCallback['\n'] = lineBreak
|
||||
p.inlineCallback['['] = link
|
||||
p.inlineCallback['<'] = leftAngle
|
||||
p.inlineCallback['\\'] = escape
|
||||
p.inlineCallback['&'] = entity
|
||||
p.inlineCallback['!'] = maybeImage
|
||||
p.inlineCallback['^'] = maybeInlineFootnote
|
||||
if p.extensions&Autolink != 0 {
|
||||
p.inlineCallback['h'] = maybeAutoLink
|
||||
p.inlineCallback['m'] = maybeAutoLink
|
||||
p.inlineCallback['f'] = maybeAutoLink
|
||||
p.inlineCallback['H'] = maybeAutoLink
|
||||
p.inlineCallback['M'] = maybeAutoLink
|
||||
p.inlineCallback['F'] = maybeAutoLink
|
||||
}
|
||||
if p.extensions&Footnotes != 0 {
|
||||
p.notes = make([]*reference, 0)
|
||||
}
|
||||
return &p
|
||||
}
|
||||
|
||||
// Option customizes the Markdown processor's default behavior.
|
||||
type Option func(*Markdown)
|
||||
|
||||
// WithRenderer allows you to override the default renderer.
|
||||
func WithRenderer(r Renderer) Option {
|
||||
return func(p *Markdown) {
|
||||
p.renderer = r
|
||||
}
|
||||
}
|
||||
|
||||
// WithExtensions allows you to pick some of the many extensions provided by
|
||||
// Blackfriday. You can bitwise OR them.
|
||||
func WithExtensions(e Extensions) Option {
|
||||
return func(p *Markdown) {
|
||||
p.extensions = e
|
||||
}
|
||||
}
|
||||
|
||||
// WithNoExtensions turns off all extensions and custom behavior.
|
||||
func WithNoExtensions() Option {
|
||||
return func(p *Markdown) {
|
||||
p.extensions = NoExtensions
|
||||
p.renderer = NewHTMLRenderer(HTMLRendererParameters{
|
||||
Flags: HTMLFlagsNone,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithRefOverride sets an optional function callback that is called every
|
||||
// time a reference is resolved.
|
||||
//
|
||||
// In Markdown, the link reference syntax can be made to resolve a link to
|
||||
// a reference instead of an inline URL, in one of the following ways:
|
||||
//
|
||||
// * [link text][refid]
|
||||
// * [refid][]
|
||||
//
|
||||
// Usually, the refid is defined at the bottom of the Markdown document. If
|
||||
// this override function is provided, the refid is passed to the override
|
||||
// function first, before consulting the defined refids at the bottom. If
|
||||
// the override function indicates an override did not occur, the refids at
|
||||
// the bottom will be used to fill in the link details.
|
||||
func WithRefOverride(o ReferenceOverrideFunc) Option {
|
||||
return func(p *Markdown) {
|
||||
p.referenceOverride = o
|
||||
}
|
||||
}
|
||||
|
||||
// Run is the main entry point to Blackfriday. It parses and renders a
|
||||
// block of markdown-encoded text.
|
||||
//
|
||||
// The simplest invocation of Run takes one argument, input:
|
||||
// output := Run(input)
|
||||
// This will parse the input with CommonExtensions enabled and render it with
|
||||
// the default HTMLRenderer (with CommonHTMLFlags).
|
||||
//
|
||||
// Variadic arguments opts can customize the default behavior. Since Markdown
|
||||
// type does not contain exported fields, you can not use it directly. Instead,
|
||||
// use the With* functions. For example, this will call the most basic
|
||||
// functionality, with no extensions:
|
||||
// output := Run(input, WithNoExtensions())
|
||||
//
|
||||
// You can use any number of With* arguments, even contradicting ones. They
|
||||
// will be applied in order of appearance and the latter will override the
|
||||
// former:
|
||||
// output := Run(input, WithNoExtensions(), WithExtensions(exts),
|
||||
// WithRenderer(yourRenderer))
|
||||
func Run(input []byte, opts ...Option) []byte {
|
||||
r := NewHTMLRenderer(HTMLRendererParameters{
|
||||
Flags: CommonHTMLFlags,
|
||||
})
|
||||
optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)}
|
||||
optList = append(optList, opts...)
|
||||
parser := New(optList...)
|
||||
ast := parser.Parse(input)
|
||||
var buf bytes.Buffer
|
||||
parser.renderer.RenderHeader(&buf, ast)
|
||||
ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||||
return parser.renderer.RenderNode(&buf, node, entering)
|
||||
})
|
||||
parser.renderer.RenderFooter(&buf, ast)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Parse is an entry point to the parsing part of Blackfriday. It takes an
|
||||
// input markdown document and produces a syntax tree for its contents. This
|
||||
// tree can then be rendered with a default or custom renderer, or
|
||||
// analyzed/transformed by the caller to whatever non-standard needs they have.
|
||||
// The return value is the root node of the syntax tree.
|
||||
func (p *Markdown) Parse(input []byte) *Node {
|
||||
p.block(input)
|
||||
// Walk the tree and finish up some of unfinished blocks
|
||||
for p.tip != nil {
|
||||
p.finalize(p.tip)
|
||||
}
|
||||
// Walk the tree again and process inline markdown in each block
|
||||
p.doc.Walk(func(node *Node, entering bool) WalkStatus {
|
||||
if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell {
|
||||
p.inline(node, node.content)
|
||||
node.content = nil
|
||||
}
|
||||
return GoToNext
|
||||
})
|
||||
p.parseRefsToAST()
|
||||
return p.doc
|
||||
}
|
||||
|
||||
func (p *Markdown) parseRefsToAST() {
|
||||
if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
|
||||
return
|
||||
}
|
||||
p.tip = p.doc
|
||||
block := p.addBlock(List, nil)
|
||||
block.IsFootnotesList = true
|
||||
block.ListFlags = ListTypeOrdered
|
||||
flags := ListItemBeginningOfList
|
||||
// Note: this loop is intentionally explicit, not range-form. This is
|
||||
// because the body of the loop will append nested footnotes to p.notes and
|
||||
// we need to process those late additions. Range form would only walk over
|
||||
// the fixed initial set.
|
||||
for i := 0; i < len(p.notes); i++ {
|
||||
ref := p.notes[i]
|
||||
p.addExistingChild(ref.footnote, 0)
|
||||
block := ref.footnote
|
||||
block.ListFlags = flags | ListTypeOrdered
|
||||
block.RefLink = ref.link
|
||||
if ref.hasBlock {
|
||||
flags |= ListItemContainsBlock
|
||||
p.block(ref.title)
|
||||
} else {
|
||||
p.inline(block, ref.title)
|
||||
}
|
||||
flags &^= ListItemBeginningOfList | ListItemContainsBlock
|
||||
}
|
||||
above := block.Parent
|
||||
finalizeList(block)
|
||||
p.tip = above
|
||||
block.Walk(func(node *Node, entering bool) WalkStatus {
|
||||
if node.Type == Paragraph || node.Type == Heading {
|
||||
p.inline(node, node.content)
|
||||
node.content = nil
|
||||
}
|
||||
return GoToNext
|
||||
})
|
||||
}
|
||||
|
||||
//
|
||||
// Link references
|
||||
//
|
||||
// This section implements support for references that (usually) appear
|
||||
// as footnotes in a document, and can be referenced anywhere in the document.
|
||||
// The basic format is:
|
||||
//
|
||||
// [1]: http://www.google.com/ "Google"
|
||||
// [2]: http://www.github.com/ "Github"
|
||||
//
|
||||
// Anywhere in the document, the reference can be linked by referring to its
|
||||
// label, i.e., 1 and 2 in this example, as in:
|
||||
//
|
||||
// This library is hosted on [Github][2], a git hosting site.
|
||||
//
|
||||
// Actual footnotes as specified in Pandoc and supported by some other Markdown
|
||||
// libraries such as php-markdown are also taken care of. They look like this:
|
||||
//
|
||||
// This sentence needs a bit of further explanation.[^note]
|
||||
//
|
||||
// [^note]: This is the explanation.
|
||||
//
|
||||
// Footnotes should be placed at the end of the document in an ordered list.
|
||||
// Finally, there are inline footnotes such as:
|
||||
//
|
||||
// Inline footnotes^[Also supported.] provide a quick inline explanation,
|
||||
// but are rendered at the bottom of the document.
|
||||
//
|
||||
|
||||
// reference holds all information necessary for a reference-style links or
|
||||
// footnotes.
|
||||
//
|
||||
// Consider this markdown with reference-style links:
|
||||
//
|
||||
// [link][ref]
|
||||
//
|
||||
// [ref]: /url/ "tooltip title"
|
||||
//
|
||||
// It will be ultimately converted to this HTML:
|
||||
//
|
||||
// <p><a href=\"/url/\" title=\"title\">link</a></p>
|
||||
//
|
||||
// And a reference structure will be populated as follows:
|
||||
//
|
||||
// p.refs["ref"] = &reference{
|
||||
// link: "/url/",
|
||||
// title: "tooltip title",
|
||||
// }
|
||||
//
|
||||
// Alternatively, reference can contain information about a footnote. Consider
|
||||
// this markdown:
|
||||
//
|
||||
// Text needing a footnote.[^a]
|
||||
//
|
||||
// [^a]: This is the note
|
||||
//
|
||||
// A reference structure will be populated as follows:
|
||||
//
|
||||
// p.refs["a"] = &reference{
|
||||
// link: "a",
|
||||
// title: "This is the note",
|
||||
// noteID: <some positive int>,
|
||||
// }
|
||||
//
|
||||
// TODO: As you can see, it begs for splitting into two dedicated structures
|
||||
// for refs and for footnotes.
|
||||
type reference struct {
|
||||
link []byte
|
||||
title []byte
|
||||
noteID int // 0 if not a footnote ref
|
||||
hasBlock bool
|
||||
footnote *Node // a link to the Item node within a list of footnotes
|
||||
|
||||
text []byte // only gets populated by refOverride feature with Reference.Text
|
||||
}
|
||||
|
||||
func (r *reference) String() string {
|
||||
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
|
||||
r.link, r.title, r.text, r.noteID, r.hasBlock)
|
||||
}
|
||||
|
||||
// Check whether or not data starts with a reference link.
|
||||
// If so, it is parsed and stored in the list of references
|
||||
// (in the render struct).
|
||||
// Returns the number of bytes to skip to move past it,
|
||||
// or zero if the first line is not a reference.
|
||||
func isReference(p *Markdown, data []byte, tabSize int) int {
|
||||
// up to 3 optional leading spaces
|
||||
if len(data) < 4 {
|
||||
return 0
|
||||
}
|
||||
i := 0
|
||||
for i < 3 && data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
|
||||
noteID := 0
|
||||
|
||||
// id part: anything but a newline between brackets
|
||||
if data[i] != '[' {
|
||||
return 0
|
||||
}
|
||||
i++
|
||||
if p.extensions&Footnotes != 0 {
|
||||
if i < len(data) && data[i] == '^' {
|
||||
// we can set it to anything here because the proper noteIds will
|
||||
// be assigned later during the second pass. It just has to be != 0
|
||||
noteID = 1
|
||||
i++
|
||||
}
|
||||
}
|
||||
idOffset := i
|
||||
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
|
||||
i++
|
||||
}
|
||||
if i >= len(data) || data[i] != ']' {
|
||||
return 0
|
||||
}
|
||||
idEnd := i
|
||||
// footnotes can have empty ID, like this: [^], but a reference can not be
|
||||
// empty like this: []. Break early if it's not a footnote and there's no ID
|
||||
if noteID == 0 && idOffset == idEnd {
|
||||
return 0
|
||||
}
|
||||
// spacer: colon (space | tab)* newline? (space | tab)*
|
||||
i++
|
||||
if i >= len(data) || data[i] != ':' {
|
||||
return 0
|
||||
}
|
||||
i++
|
||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||||
i++
|
||||
}
|
||||
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
|
||||
i++
|
||||
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
|
||||
i++
|
||||
}
|
||||
}
|
||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||||
i++
|
||||
}
|
||||
if i >= len(data) {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
linkOffset, linkEnd int
|
||||
titleOffset, titleEnd int
|
||||
lineEnd int
|
||||
raw []byte
|
||||
hasBlock bool
|
||||
)
|
||||
|
||||
if p.extensions&Footnotes != 0 && noteID != 0 {
|
||||
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
|
||||
lineEnd = linkEnd
|
||||
} else {
|
||||
linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
|
||||
}
|
||||
if lineEnd == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// a valid ref has been found
|
||||
|
||||
ref := &reference{
|
||||
noteID: noteID,
|
||||
hasBlock: hasBlock,
|
||||
}
|
||||
|
||||
if noteID > 0 {
|
||||
// reusing the link field for the id since footnotes don't have links
|
||||
ref.link = data[idOffset:idEnd]
|
||||
// if footnote, it's not really a title, it's the contained text
|
||||
ref.title = raw
|
||||
} else {
|
||||
ref.link = data[linkOffset:linkEnd]
|
||||
ref.title = data[titleOffset:titleEnd]
|
||||
}
|
||||
|
||||
// id matches are case-insensitive
|
||||
id := string(bytes.ToLower(data[idOffset:idEnd]))
|
||||
|
||||
p.refs[id] = ref
|
||||
|
||||
return lineEnd
|
||||
}
|
||||
|
||||
func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
|
||||
// link: whitespace-free sequence, optionally between angle brackets
|
||||
if data[i] == '<' {
|
||||
i++
|
||||
}
|
||||
linkOffset = i
|
||||
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
||||
i++
|
||||
}
|
||||
linkEnd = i
|
||||
if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
|
||||
linkOffset++
|
||||
linkEnd--
|
||||
}
|
||||
|
||||
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
|
||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||||
i++
|
||||
}
|
||||
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
|
||||
return
|
||||
}
|
||||
|
||||
// compute end-of-line
|
||||
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
|
||||
lineEnd = i
|
||||
}
|
||||
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
|
||||
lineEnd++
|
||||
}
|
||||
|
||||
// optional (space|tab)* spacer after a newline
|
||||
if lineEnd > 0 {
|
||||
i = lineEnd + 1
|
||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// optional title: any non-newline sequence enclosed in '"() alone on its line
|
||||
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
|
||||
i++
|
||||
titleOffset = i
|
||||
|
||||
// look for EOL
|
||||
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
|
||||
i++
|
||||
}
|
||||
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
|
||||
titleEnd = i + 1
|
||||
} else {
|
||||
titleEnd = i
|
||||
}
|
||||
|
||||
// step back
|
||||
i--
|
||||
for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
|
||||
i--
|
||||
}
|
||||
if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
|
||||
lineEnd = titleEnd
|
||||
titleEnd = i
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The first bit of this logic is the same as Parser.listItem, but the rest
|
||||
// is much simpler. This function simply finds the entire block and shifts it
|
||||
// over by one tab if it is indeed a block (just returns the line if it's not).
|
||||
// blockEnd is the end of the section in the input buffer, and contents is the
|
||||
// extracted text that was shifted over one tab. It will need to be rendered at
|
||||
// the end of the document.
|
||||
func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
|
||||
if i == 0 || len(data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// skip leading whitespace on first line
|
||||
for i < len(data) && data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
|
||||
blockStart = i
|
||||
|
||||
// find the end of the line
|
||||
blockEnd = i
|
||||
for i < len(data) && data[i-1] != '\n' {
|
||||
i++
|
||||
}
|
||||
|
||||
// get working buffer
|
||||
var raw bytes.Buffer
|
||||
|
||||
// put the first line into the working buffer
|
||||
raw.Write(data[blockEnd:i])
|
||||
blockEnd = i
|
||||
|
||||
// process the following lines
|
||||
containsBlankLine := false
|
||||
|
||||
gatherLines:
|
||||
for blockEnd < len(data) {
|
||||
i++
|
||||
|
||||
// find the end of this line
|
||||
for i < len(data) && data[i-1] != '\n' {
|
||||
i++
|
||||
}
|
||||
|
||||
// if it is an empty line, guess that it is part of this item
|
||||
// and move on to the next line
|
||||
if p.isEmpty(data[blockEnd:i]) > 0 {
|
||||
containsBlankLine = true
|
||||
blockEnd = i
|
||||
continue
|
||||
}
|
||||
|
||||
n := 0
|
||||
if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
|
||||
// this is the end of the block.
|
||||
// we don't want to include this last line in the index.
|
||||
break gatherLines
|
||||
}
|
||||
|
||||
// if there were blank lines before this one, insert a new one now
|
||||
if containsBlankLine {
|
||||
raw.WriteByte('\n')
|
||||
containsBlankLine = false
|
||||
}
|
||||
|
||||
// get rid of that first tab, write to buffer
|
||||
raw.Write(data[blockEnd+n : i])
|
||||
hasBlock = true
|
||||
|
||||
blockEnd = i
|
||||
}
|
||||
|
||||
if data[blockEnd-1] != '\n' {
|
||||
raw.WriteByte('\n')
|
||||
}
|
||||
|
||||
contents = raw.Bytes()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// Miscellaneous helper functions
|
||||
//
|
||||
//
|
||||
|
||||
// Test if a character is a punctuation symbol.
|
||||
// Taken from a private function in regexp in the stdlib.
|
||||
func ispunct(c byte) bool {
|
||||
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
|
||||
if c == r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Test if a character is a whitespace character.
|
||||
func isspace(c byte) bool {
|
||||
return ishorizontalspace(c) || isverticalspace(c)
|
||||
}
|
||||
|
||||
// Test if a character is a horizontal whitespace character.
|
||||
func ishorizontalspace(c byte) bool {
|
||||
return c == ' ' || c == '\t'
|
||||
}
|
||||
|
||||
// Test if a character is a vertical character.
|
||||
func isverticalspace(c byte) bool {
|
||||
return c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
||||
}
|
||||
|
||||
// Test if a character is letter.
|
||||
func isletter(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
// Test if a character is a letter or a digit.
|
||||
// TODO: check when this is looking for ASCII alnum and when it should use unicode
|
||||
func isalnum(c byte) bool {
|
||||
return (c >= '0' && c <= '9') || isletter(c)
|
||||
}
|
||||
|
||||
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
||||
// always ends output with a newline
|
||||
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
|
||||
// first, check for common cases: no tabs, or only tabs at beginning of line
|
||||
i, prefix := 0, 0
|
||||
slowcase := false
|
||||
for i = 0; i < len(line); i++ {
|
||||
if line[i] == '\t' {
|
||||
if prefix == i {
|
||||
prefix++
|
||||
} else {
|
||||
slowcase = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// no need to decode runes if all tabs are at the beginning of the line
|
||||
if !slowcase {
|
||||
for i = 0; i < prefix*tabSize; i++ {
|
||||
out.WriteByte(' ')
|
||||
}
|
||||
out.Write(line[prefix:])
|
||||
return
|
||||
}
|
||||
|
||||
// the slow case: we need to count runes to figure out how
|
||||
// many spaces to insert for each tab
|
||||
column := 0
|
||||
i = 0
|
||||
for i < len(line) {
|
||||
start := i
|
||||
for i < len(line) && line[i] != '\t' {
|
||||
_, size := utf8.DecodeRune(line[i:])
|
||||
i += size
|
||||
column++
|
||||
}
|
||||
|
||||
if i > start {
|
||||
out.Write(line[start:i])
|
||||
}
|
||||
|
||||
if i >= len(line) {
|
||||
break
|
||||
}
|
||||
|
||||
for {
|
||||
out.WriteByte(' ')
|
||||
column++
|
||||
if column%tabSize == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// Find if a line counts as indented or not.
|
||||
// Returns number of characters the indent is (0 = not indented).
|
||||
func isIndented(data []byte, indentSize int) int {
|
||||
if len(data) == 0 {
|
||||
return 0
|
||||
}
|
||||
if data[0] == '\t' {
|
||||
return 1
|
||||
}
|
||||
if len(data) < indentSize {
|
||||
return 0
|
||||
}
|
||||
for i := 0; i < indentSize; i++ {
|
||||
if data[i] != ' ' {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
return indentSize
|
||||
}
|
||||
|
||||
// Create a url-safe slug for fragments
|
||||
func slugify(in []byte) []byte {
|
||||
if len(in) == 0 {
|
||||
return in
|
||||
}
|
||||
out := make([]byte, 0, len(in))
|
||||
sym := false
|
||||
|
||||
for _, ch := range in {
|
||||
if isalnum(ch) {
|
||||
sym = false
|
||||
out = append(out, ch)
|
||||
} else if sym {
|
||||
continue
|
||||
} else {
|
||||
out = append(out, '-')
|
||||
sym = true
|
||||
}
|
||||
}
|
||||
var a, b int
|
||||
var ch byte
|
||||
for a, ch = range out {
|
||||
if ch != '-' {
|
||||
break
|
||||
}
|
||||
}
|
||||
for b = len(out) - 1; b > 0; b-- {
|
||||
if out[b] != '-' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return out[a : b+1]
|
||||
}
|
||||
360
man/vendor/github.com/russross/blackfriday/v2/node.go
generated
vendored
Normal file
360
man/vendor/github.com/russross/blackfriday/v2/node.go
generated
vendored
Normal file
@@ -0,0 +1,360 @@
|
||||
package blackfriday
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// NodeType specifies a type of a single node of a syntax tree. Usually one
|
||||
// node (and its type) corresponds to a single markdown feature, e.g. emphasis
|
||||
// or code block.
|
||||
type NodeType int
|
||||
|
||||
// Constants for identifying different types of nodes. See NodeType.
|
||||
const (
|
||||
Document NodeType = iota
|
||||
BlockQuote
|
||||
List
|
||||
Item
|
||||
Paragraph
|
||||
Heading
|
||||
HorizontalRule
|
||||
Emph
|
||||
Strong
|
||||
Del
|
||||
Link
|
||||
Image
|
||||
Text
|
||||
HTMLBlock
|
||||
CodeBlock
|
||||
Softbreak
|
||||
Hardbreak
|
||||
Code
|
||||
HTMLSpan
|
||||
Table
|
||||
TableCell
|
||||
TableHead
|
||||
TableBody
|
||||
TableRow
|
||||
)
|
||||
|
||||
var nodeTypeNames = []string{
|
||||
Document: "Document",
|
||||
BlockQuote: "BlockQuote",
|
||||
List: "List",
|
||||
Item: "Item",
|
||||
Paragraph: "Paragraph",
|
||||
Heading: "Heading",
|
||||
HorizontalRule: "HorizontalRule",
|
||||
Emph: "Emph",
|
||||
Strong: "Strong",
|
||||
Del: "Del",
|
||||
Link: "Link",
|
||||
Image: "Image",
|
||||
Text: "Text",
|
||||
HTMLBlock: "HTMLBlock",
|
||||
CodeBlock: "CodeBlock",
|
||||
Softbreak: "Softbreak",
|
||||
Hardbreak: "Hardbreak",
|
||||
Code: "Code",
|
||||
HTMLSpan: "HTMLSpan",
|
||||
Table: "Table",
|
||||
TableCell: "TableCell",
|
||||
TableHead: "TableHead",
|
||||
TableBody: "TableBody",
|
||||
TableRow: "TableRow",
|
||||
}
|
||||
|
||||
func (t NodeType) String() string {
|
||||
return nodeTypeNames[t]
|
||||
}
|
||||
|
||||
// ListData contains fields relevant to a List and Item node type.
|
||||
type ListData struct {
|
||||
ListFlags ListType
|
||||
Tight bool // Skip <p>s around list item data if true
|
||||
BulletChar byte // '*', '+' or '-' in bullet lists
|
||||
Delimiter byte // '.' or ')' after the number in ordered lists
|
||||
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
|
||||
IsFootnotesList bool // This is a list of footnotes
|
||||
}
|
||||
|
||||
// LinkData contains fields relevant to a Link node type.
|
||||
type LinkData struct {
|
||||
Destination []byte // Destination is what goes into a href
|
||||
Title []byte // Title is the tooltip thing that goes in a title attribute
|
||||
NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
|
||||
Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
|
||||
}
|
||||
|
||||
// CodeBlockData contains fields relevant to a CodeBlock node type.
|
||||
type CodeBlockData struct {
|
||||
IsFenced bool // Specifies whether it's a fenced code block or an indented one
|
||||
Info []byte // This holds the info string
|
||||
FenceChar byte
|
||||
FenceLength int
|
||||
FenceOffset int
|
||||
}
|
||||
|
||||
// TableCellData contains fields relevant to a TableCell node type.
|
||||
type TableCellData struct {
|
||||
IsHeader bool // This tells if it's under the header row
|
||||
Align CellAlignFlags // This holds the value for align attribute
|
||||
}
|
||||
|
||||
// HeadingData contains fields relevant to a Heading node type.
|
||||
type HeadingData struct {
|
||||
Level int // This holds the heading level number
|
||||
HeadingID string // This might hold heading ID, if present
|
||||
IsTitleblock bool // Specifies whether it's a title block
|
||||
}
|
||||
|
||||
// Node is a single element in the abstract syntax tree of the parsed document.
|
||||
// It holds connections to the structurally neighboring nodes and, for certain
|
||||
// types of nodes, additional information that might be needed when rendering.
|
||||
type Node struct {
|
||||
Type NodeType // Determines the type of the node
|
||||
Parent *Node // Points to the parent
|
||||
FirstChild *Node // Points to the first child, if any
|
||||
LastChild *Node // Points to the last child, if any
|
||||
Prev *Node // Previous sibling; nil if it's the first child
|
||||
Next *Node // Next sibling; nil if it's the last child
|
||||
|
||||
Literal []byte // Text contents of the leaf nodes
|
||||
|
||||
HeadingData // Populated if Type is Heading
|
||||
ListData // Populated if Type is List
|
||||
CodeBlockData // Populated if Type is CodeBlock
|
||||
LinkData // Populated if Type is Link
|
||||
TableCellData // Populated if Type is TableCell
|
||||
|
||||
content []byte // Markdown content of the block nodes
|
||||
open bool // Specifies an open block node that has not been finished to process yet
|
||||
}
|
||||
|
||||
// NewNode allocates a node of a specified type.
|
||||
func NewNode(typ NodeType) *Node {
|
||||
return &Node{
|
||||
Type: typ,
|
||||
open: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) String() string {
|
||||
ellipsis := ""
|
||||
snippet := n.Literal
|
||||
if len(snippet) > 16 {
|
||||
snippet = snippet[:16]
|
||||
ellipsis = "..."
|
||||
}
|
||||
return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis)
|
||||
}
|
||||
|
||||
// Unlink removes node 'n' from the tree.
|
||||
// It panics if the node is nil.
|
||||
func (n *Node) Unlink() {
|
||||
if n.Prev != nil {
|
||||
n.Prev.Next = n.Next
|
||||
} else if n.Parent != nil {
|
||||
n.Parent.FirstChild = n.Next
|
||||
}
|
||||
if n.Next != nil {
|
||||
n.Next.Prev = n.Prev
|
||||
} else if n.Parent != nil {
|
||||
n.Parent.LastChild = n.Prev
|
||||
}
|
||||
n.Parent = nil
|
||||
n.Next = nil
|
||||
n.Prev = nil
|
||||
}
|
||||
|
||||
// AppendChild adds a node 'child' as a child of 'n'.
|
||||
// It panics if either node is nil.
|
||||
func (n *Node) AppendChild(child *Node) {
|
||||
child.Unlink()
|
||||
child.Parent = n
|
||||
if n.LastChild != nil {
|
||||
n.LastChild.Next = child
|
||||
child.Prev = n.LastChild
|
||||
n.LastChild = child
|
||||
} else {
|
||||
n.FirstChild = child
|
||||
n.LastChild = child
|
||||
}
|
||||
}
|
||||
|
||||
// InsertBefore inserts 'sibling' immediately before 'n'.
|
||||
// It panics if either node is nil.
|
||||
func (n *Node) InsertBefore(sibling *Node) {
|
||||
sibling.Unlink()
|
||||
sibling.Prev = n.Prev
|
||||
if sibling.Prev != nil {
|
||||
sibling.Prev.Next = sibling
|
||||
}
|
||||
sibling.Next = n
|
||||
n.Prev = sibling
|
||||
sibling.Parent = n.Parent
|
||||
if sibling.Prev == nil {
|
||||
sibling.Parent.FirstChild = sibling
|
||||
}
|
||||
}
|
||||
|
||||
// IsContainer returns true if 'n' can contain children.
|
||||
func (n *Node) IsContainer() bool {
|
||||
switch n.Type {
|
||||
case Document:
|
||||
fallthrough
|
||||
case BlockQuote:
|
||||
fallthrough
|
||||
case List:
|
||||
fallthrough
|
||||
case Item:
|
||||
fallthrough
|
||||
case Paragraph:
|
||||
fallthrough
|
||||
case Heading:
|
||||
fallthrough
|
||||
case Emph:
|
||||
fallthrough
|
||||
case Strong:
|
||||
fallthrough
|
||||
case Del:
|
||||
fallthrough
|
||||
case Link:
|
||||
fallthrough
|
||||
case Image:
|
||||
fallthrough
|
||||
case Table:
|
||||
fallthrough
|
||||
case TableHead:
|
||||
fallthrough
|
||||
case TableBody:
|
||||
fallthrough
|
||||
case TableRow:
|
||||
fallthrough
|
||||
case TableCell:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsLeaf returns true if 'n' is a leaf node.
|
||||
func (n *Node) IsLeaf() bool {
|
||||
return !n.IsContainer()
|
||||
}
|
||||
|
||||
func (n *Node) canContain(t NodeType) bool {
|
||||
if n.Type == List {
|
||||
return t == Item
|
||||
}
|
||||
if n.Type == Document || n.Type == BlockQuote || n.Type == Item {
|
||||
return t != Item
|
||||
}
|
||||
if n.Type == Table {
|
||||
return t == TableHead || t == TableBody
|
||||
}
|
||||
if n.Type == TableHead || n.Type == TableBody {
|
||||
return t == TableRow
|
||||
}
|
||||
if n.Type == TableRow {
|
||||
return t == TableCell
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WalkStatus allows NodeVisitor to have some control over the tree traversal.
|
||||
// It is returned from NodeVisitor and different values allow Node.Walk to
|
||||
// decide which node to go to next.
|
||||
type WalkStatus int
|
||||
|
||||
const (
|
||||
// GoToNext is the default traversal of every node.
|
||||
GoToNext WalkStatus = iota
|
||||
// SkipChildren tells walker to skip all children of current node.
|
||||
SkipChildren
|
||||
// Terminate tells walker to terminate the traversal.
|
||||
Terminate
|
||||
)
|
||||
|
||||
// NodeVisitor is a callback to be called when traversing the syntax tree.
|
||||
// Called twice for every node: once with entering=true when the branch is
|
||||
// first visited, then with entering=false after all the children are done.
|
||||
type NodeVisitor func(node *Node, entering bool) WalkStatus
|
||||
|
||||
// Walk is a convenience method that instantiates a walker and starts a
|
||||
// traversal of subtree rooted at n.
|
||||
func (n *Node) Walk(visitor NodeVisitor) {
|
||||
w := newNodeWalker(n)
|
||||
for w.current != nil {
|
||||
status := visitor(w.current, w.entering)
|
||||
switch status {
|
||||
case GoToNext:
|
||||
w.next()
|
||||
case SkipChildren:
|
||||
w.entering = false
|
||||
w.next()
|
||||
case Terminate:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type nodeWalker struct {
|
||||
current *Node
|
||||
root *Node
|
||||
entering bool
|
||||
}
|
||||
|
||||
func newNodeWalker(root *Node) *nodeWalker {
|
||||
return &nodeWalker{
|
||||
current: root,
|
||||
root: root,
|
||||
entering: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (nw *nodeWalker) next() {
|
||||
if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root {
|
||||
nw.current = nil
|
||||
return
|
||||
}
|
||||
if nw.entering && nw.current.IsContainer() {
|
||||
if nw.current.FirstChild != nil {
|
||||
nw.current = nw.current.FirstChild
|
||||
nw.entering = true
|
||||
} else {
|
||||
nw.entering = false
|
||||
}
|
||||
} else if nw.current.Next == nil {
|
||||
nw.current = nw.current.Parent
|
||||
nw.entering = false
|
||||
} else {
|
||||
nw.current = nw.current.Next
|
||||
nw.entering = true
|
||||
}
|
||||
}
|
||||
|
||||
func dump(ast *Node) {
|
||||
fmt.Println(dumpString(ast))
|
||||
}
|
||||
|
||||
func dumpR(ast *Node, depth int) string {
|
||||
if ast == nil {
|
||||
return ""
|
||||
}
|
||||
indent := bytes.Repeat([]byte("\t"), depth)
|
||||
content := ast.Literal
|
||||
if content == nil {
|
||||
content = ast.content
|
||||
}
|
||||
result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content)
|
||||
for n := ast.FirstChild; n != nil; n = n.Next {
|
||||
result += dumpR(n, depth+1)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func dumpString(ast *Node) string {
|
||||
return dumpR(ast, 0)
|
||||
}
|
||||
457
man/vendor/github.com/russross/blackfriday/v2/smartypants.go
generated
vendored
Normal file
457
man/vendor/github.com/russross/blackfriday/v2/smartypants.go
generated
vendored
Normal file
@@ -0,0 +1,457 @@
|
||||
//
|
||||
// Blackfriday Markdown Processor
|
||||
// Available at http://github.com/russross/blackfriday
|
||||
//
|
||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||||
// Distributed under the Simplified BSD License.
|
||||
// See README.md for details.
|
||||
//
|
||||
|
||||
//
|
||||
//
|
||||
// SmartyPants rendering
|
||||
//
|
||||
//
|
||||
|
||||
package blackfriday
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// SPRenderer is a struct containing state of a Smartypants renderer.
|
||||
type SPRenderer struct {
|
||||
inSingleQuote bool
|
||||
inDoubleQuote bool
|
||||
callbacks [256]smartCallback
|
||||
}
|
||||
|
||||
func wordBoundary(c byte) bool {
|
||||
return c == 0 || isspace(c) || ispunct(c)
|
||||
}
|
||||
|
||||
func tolower(c byte) byte {
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
return c - 'A' + 'a'
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func isdigit(c byte) bool {
|
||||
return c >= '0' && c <= '9'
|
||||
}
|
||||
|
||||
func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
|
||||
// edge of the buffer is likely to be a tag that we don't get to see,
|
||||
// so we treat it like text sometimes
|
||||
|
||||
// enumerate all sixteen possibilities for (previousChar, nextChar)
|
||||
// each can be one of {0, space, punct, other}
|
||||
switch {
|
||||
case previousChar == 0 && nextChar == 0:
|
||||
// context is not any help here, so toggle
|
||||
*isOpen = !*isOpen
|
||||
case isspace(previousChar) && nextChar == 0:
|
||||
// [ "] might be [ "<code>foo...]
|
||||
*isOpen = true
|
||||
case ispunct(previousChar) && nextChar == 0:
|
||||
// [!"] hmm... could be [Run!"] or [("<code>...]
|
||||
*isOpen = false
|
||||
case /* isnormal(previousChar) && */ nextChar == 0:
|
||||
// [a"] is probably a close
|
||||
*isOpen = false
|
||||
case previousChar == 0 && isspace(nextChar):
|
||||
// [" ] might be [...foo</code>" ]
|
||||
*isOpen = false
|
||||
case isspace(previousChar) && isspace(nextChar):
|
||||
// [ " ] context is not any help here, so toggle
|
||||
*isOpen = !*isOpen
|
||||
case ispunct(previousChar) && isspace(nextChar):
|
||||
// [!" ] is probably a close
|
||||
*isOpen = false
|
||||
case /* isnormal(previousChar) && */ isspace(nextChar):
|
||||
// [a" ] this is one of the easy cases
|
||||
*isOpen = false
|
||||
case previousChar == 0 && ispunct(nextChar):
|
||||
// ["!] hmm... could be ["$1.95] or [</code>"!...]
|
||||
*isOpen = false
|
||||
case isspace(previousChar) && ispunct(nextChar):
|
||||
// [ "!] looks more like [ "$1.95]
|
||||
*isOpen = true
|
||||
case ispunct(previousChar) && ispunct(nextChar):
|
||||
// [!"!] context is not any help here, so toggle
|
||||
*isOpen = !*isOpen
|
||||
case /* isnormal(previousChar) && */ ispunct(nextChar):
|
||||
// [a"!] is probably a close
|
||||
*isOpen = false
|
||||
case previousChar == 0 /* && isnormal(nextChar) */ :
|
||||
// ["a] is probably an open
|
||||
*isOpen = true
|
||||
case isspace(previousChar) /* && isnormal(nextChar) */ :
|
||||
// [ "a] this is one of the easy cases
|
||||
*isOpen = true
|
||||
case ispunct(previousChar) /* && isnormal(nextChar) */ :
|
||||
// [!"a] is probably an open
|
||||
*isOpen = true
|
||||
default:
|
||||
// [a'b] maybe a contraction?
|
||||
*isOpen = false
|
||||
}
|
||||
|
||||
// Note that with the limited lookahead, this non-breaking
|
||||
// space will also be appended to single double quotes.
|
||||
if addNBSP && !*isOpen {
|
||||
out.WriteString(" ")
|
||||
}
|
||||
|
||||
out.WriteByte('&')
|
||||
if *isOpen {
|
||||
out.WriteByte('l')
|
||||
} else {
|
||||
out.WriteByte('r')
|
||||
}
|
||||
out.WriteByte(quote)
|
||||
out.WriteString("quo;")
|
||||
|
||||
if addNBSP && *isOpen {
|
||||
out.WriteString(" ")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 2 {
|
||||
t1 := tolower(text[1])
|
||||
|
||||
if t1 == '\'' {
|
||||
nextChar := byte(0)
|
||||
if len(text) >= 3 {
|
||||
nextChar = text[2]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
|
||||
out.WriteString("’")
|
||||
return 0
|
||||
}
|
||||
|
||||
if len(text) >= 3 {
|
||||
t2 := tolower(text[2])
|
||||
|
||||
if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
|
||||
(len(text) < 4 || wordBoundary(text[3])) {
|
||||
out.WriteString("’")
|
||||
return 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nextChar := byte(0)
|
||||
if len(text) > 1 {
|
||||
nextChar = text[1]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
|
||||
return 0
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 3 {
|
||||
t1 := tolower(text[1])
|
||||
t2 := tolower(text[2])
|
||||
|
||||
if t1 == 'c' && t2 == ')' {
|
||||
out.WriteString("©")
|
||||
return 2
|
||||
}
|
||||
|
||||
if t1 == 'r' && t2 == ')' {
|
||||
out.WriteString("®")
|
||||
return 2
|
||||
}
|
||||
|
||||
if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
|
||||
out.WriteString("™")
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 2 {
|
||||
if text[1] == '-' {
|
||||
out.WriteString("—")
|
||||
return 1
|
||||
}
|
||||
|
||||
if wordBoundary(previousChar) && wordBoundary(text[1]) {
|
||||
out.WriteString("–")
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
|
||||
out.WriteString("—")
|
||||
return 2
|
||||
}
|
||||
if len(text) >= 2 && text[1] == '-' {
|
||||
out.WriteString("–")
|
||||
return 1
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
|
||||
if bytes.HasPrefix(text, []byte(""")) {
|
||||
nextChar := byte(0)
|
||||
if len(text) >= 7 {
|
||||
nextChar = text[6]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(text, []byte("�")) {
|
||||
return 3
|
||||
}
|
||||
|
||||
out.WriteByte('&')
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
|
||||
var quote byte = 'd'
|
||||
if angledQuotes {
|
||||
quote = 'a'
|
||||
}
|
||||
|
||||
return func(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
|
||||
out.WriteString("…")
|
||||
return 2
|
||||
}
|
||||
|
||||
if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
|
||||
out.WriteString("…")
|
||||
return 4
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if len(text) >= 2 && text[1] == '`' {
|
||||
nextChar := byte(0)
|
||||
if len(text) >= 3 {
|
||||
nextChar = text[2]
|
||||
}
|
||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||||
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
|
||||
// note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
|
||||
// and avoid changing dates like 1/23/2005 into fractions.
|
||||
numEnd := 0
|
||||
for len(text) > numEnd && isdigit(text[numEnd]) {
|
||||
numEnd++
|
||||
}
|
||||
if numEnd == 0 {
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
denStart := numEnd + 1
|
||||
if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
|
||||
denStart = numEnd + 3
|
||||
} else if len(text) < numEnd+2 || text[numEnd] != '/' {
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
denEnd := denStart
|
||||
for len(text) > denEnd && isdigit(text[denEnd]) {
|
||||
denEnd++
|
||||
}
|
||||
if denEnd == denStart {
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
|
||||
out.WriteString("<sup>")
|
||||
out.Write(text[:numEnd])
|
||||
out.WriteString("</sup>⁄<sub>")
|
||||
out.Write(text[denStart:denEnd])
|
||||
out.WriteString("</sub>")
|
||||
return denEnd - 1
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||||
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
|
||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
|
||||
out.WriteString("½")
|
||||
return 2
|
||||
}
|
||||
}
|
||||
|
||||
if text[0] == '1' && text[1] == '/' && text[2] == '4' {
|
||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
|
||||
out.WriteString("¼")
|
||||
return 2
|
||||
}
|
||||
}
|
||||
|
||||
if text[0] == '3' && text[1] == '/' && text[2] == '4' {
|
||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
|
||||
out.WriteString("¾")
|
||||
return 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out.WriteByte(text[0])
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
|
||||
nextChar := byte(0)
|
||||
if len(text) > 1 {
|
||||
nextChar = text[1]
|
||||
}
|
||||
if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
|
||||
out.WriteString(""")
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
|
||||
}
|
||||
|
||||
func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||||
i := 0
|
||||
|
||||
for i < len(text) && text[i] != '>' {
|
||||
i++
|
||||
}
|
||||
|
||||
out.Write(text[:i+1])
|
||||
return i
|
||||
}
|
||||
|
||||
type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
|
||||
|
||||
// NewSmartypantsRenderer constructs a Smartypants renderer object.
|
||||
func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer {
|
||||
var (
|
||||
r SPRenderer
|
||||
|
||||
smartAmpAngled = r.smartAmp(true, false)
|
||||
smartAmpAngledNBSP = r.smartAmp(true, true)
|
||||
smartAmpRegular = r.smartAmp(false, false)
|
||||
smartAmpRegularNBSP = r.smartAmp(false, true)
|
||||
|
||||
addNBSP = flags&SmartypantsQuotesNBSP != 0
|
||||
)
|
||||
|
||||
if flags&SmartypantsAngledQuotes == 0 {
|
||||
r.callbacks['"'] = r.smartDoubleQuote
|
||||
if !addNBSP {
|
||||
r.callbacks['&'] = smartAmpRegular
|
||||
} else {
|
||||
r.callbacks['&'] = smartAmpRegularNBSP
|
||||
}
|
||||
} else {
|
||||
r.callbacks['"'] = r.smartAngledDoubleQuote
|
||||
if !addNBSP {
|
||||
r.callbacks['&'] = smartAmpAngled
|
||||
} else {
|
||||
r.callbacks['&'] = smartAmpAngledNBSP
|
||||
}
|
||||
}
|
||||
r.callbacks['\''] = r.smartSingleQuote
|
||||
r.callbacks['('] = r.smartParens
|
||||
if flags&SmartypantsDashes != 0 {
|
||||
if flags&SmartypantsLatexDashes == 0 {
|
||||
r.callbacks['-'] = r.smartDash
|
||||
} else {
|
||||
r.callbacks['-'] = r.smartDashLatex
|
||||
}
|
||||
}
|
||||
r.callbacks['.'] = r.smartPeriod
|
||||
if flags&SmartypantsFractions == 0 {
|
||||
r.callbacks['1'] = r.smartNumber
|
||||
r.callbacks['3'] = r.smartNumber
|
||||
} else {
|
||||
for ch := '1'; ch <= '9'; ch++ {
|
||||
r.callbacks[ch] = r.smartNumberGeneric
|
||||
}
|
||||
}
|
||||
r.callbacks['<'] = r.smartLeftAngle
|
||||
r.callbacks['`'] = r.smartBacktick
|
||||
return &r
|
||||
}
|
||||
|
||||
// Process is the entry point of the Smartypants renderer.
|
||||
func (r *SPRenderer) Process(w io.Writer, text []byte) {
|
||||
mark := 0
|
||||
for i := 0; i < len(text); i++ {
|
||||
if action := r.callbacks[text[i]]; action != nil {
|
||||
if i > mark {
|
||||
w.Write(text[mark:i])
|
||||
}
|
||||
previousChar := byte(0)
|
||||
if i > 0 {
|
||||
previousChar = text[i-1]
|
||||
}
|
||||
var tmp bytes.Buffer
|
||||
i += action(&tmp, previousChar, text[i:])
|
||||
w.Write(tmp.Bytes())
|
||||
mark = i + 1
|
||||
}
|
||||
}
|
||||
if mark < len(text) {
|
||||
w.Write(text[mark:])
|
||||
}
|
||||
}
|
||||
7
man/vendor/modules.txt
vendored
Normal file
7
man/vendor/modules.txt
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# github.com/cpuguy83/go-md2man/v2 v2.0.4
|
||||
## explicit; go 1.11
|
||||
github.com/cpuguy83/go-md2man/v2
|
||||
github.com/cpuguy83/go-md2man/v2/md2man
|
||||
# github.com/russross/blackfriday/v2 v2.1.0
|
||||
## explicit
|
||||
github.com/russross/blackfriday/v2
|
||||
@@ -48,7 +48,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||
wo = &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: hdr.Mode & int64(os.ModePerm),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted.
|
||||
Size: 0,
|
||||
Uid: hdr.Uid,
|
||||
Uname: hdr.Uname,
|
||||
@@ -56,7 +56,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
} //#nosec G305 -- An archive is being created, not extracted.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
@@ -53,10 +55,23 @@ type Ctx struct {
|
||||
authReq *Request
|
||||
}
|
||||
|
||||
func isChunked(r *http.Request) bool {
|
||||
// RFC 7230 specifies that content length is to be ignored if Transfer-Encoding is chunked
|
||||
if strings.EqualFold(r.Header.Get("Transfer-Encoding"), "chunked") {
|
||||
return true
|
||||
}
|
||||
for _, v := range r.TransferEncoding {
|
||||
if strings.EqualFold(v, "chunked") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AuthZRequest authorized the request to the docker daemon using authZ plugins
|
||||
func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error {
|
||||
var body []byte
|
||||
if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize {
|
||||
if sendBody(ctx.requestURI, r.Header) && (r.ContentLength > 0 || isChunked(r)) && r.ContentLength < maxBodySize {
|
||||
var err error
|
||||
body, r.Body, err = drainBody(r.Body)
|
||||
if err != nil {
|
||||
@@ -109,7 +124,6 @@ func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error {
|
||||
if sendBody(ctx.requestURI, rm.Header()) {
|
||||
ctx.authReq.ResponseBody = rm.RawBody()
|
||||
}
|
||||
|
||||
for _, plugin := range ctx.plugins {
|
||||
log.G(context.TODO()).Debugf("AuthZ response using plugin %s", plugin.Name())
|
||||
|
||||
@@ -147,10 +161,26 @@ func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) {
|
||||
return nil, newBody, err
|
||||
}
|
||||
|
||||
func isAuthEndpoint(urlPath string) (bool, error) {
|
||||
// eg www.test.com/v1.24/auth/optional?optional1=something&optional2=something (version optional)
|
||||
matched, err := regexp.MatchString(`^[^\/]*\/(v\d[\d\.]*\/)?auth.*`, urlPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// sendBody returns true when request/response body should be sent to AuthZPlugin
|
||||
func sendBody(url string, header http.Header) bool {
|
||||
func sendBody(inURL string, header http.Header) bool {
|
||||
u, err := url.Parse(inURL)
|
||||
// Assume no if the URL cannot be parsed - an empty request will still be forwarded to the plugin and should be rejected
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Skip body for auth endpoint
|
||||
if strings.HasSuffix(url, "/auth") {
|
||||
isAuth, err := isAuthEndpoint(u.Path)
|
||||
if isAuth || err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -174,8 +174,8 @@ func TestDrainBody(t *testing.T) {
|
||||
|
||||
func TestSendBody(t *testing.T) {
|
||||
var (
|
||||
url = "nothing.com"
|
||||
testcases = []struct {
|
||||
url string
|
||||
contentType string
|
||||
expected bool
|
||||
}{
|
||||
@@ -219,15 +219,93 @@ func TestSendBody(t *testing.T) {
|
||||
contentType: "",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/auth",
|
||||
contentType: "",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/auth",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/auth?p1=test",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/test?p1=/auth",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/something/auth",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/auth/test",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/v1.24/auth/test",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "nothing.com/v1/auth/test",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "www.nothing.com/v1.24/auth/test",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "https://www.nothing.com/v1.24/auth/test",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "http://nothing.com/v1.24/auth/test",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
url: "www.nothing.com/test?p1=/auth",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
url: "http://www.nothing.com/test?p1=/auth",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
url: "www.nothing.com/something/auth",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
url: "https://www.nothing.com/something/auth",
|
||||
contentType: "application/json;charset=UTF8",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
for _, testcase := range testcases {
|
||||
header := http.Header{}
|
||||
header.Set("Content-Type", testcase.contentType)
|
||||
if testcase.url == "" {
|
||||
testcase.url = "nothing.com"
|
||||
}
|
||||
|
||||
if b := sendBody(url, header); b != testcase.expected {
|
||||
t.Fatalf("Unexpected Content-Type; Expected: %t, Actual: %t", testcase.expected, b)
|
||||
if b := sendBody(testcase.url, header); b != testcase.expected {
|
||||
t.Fatalf("sendBody failed: url: %s, content-type: %s; Expected: %t, Actual: %t", testcase.url, testcase.contentType, testcase.expected, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,10 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
|
||||
umount,
|
||||
# Host (privileged) processes may send signals to container processes.
|
||||
signal (receive) peer=unconfined,
|
||||
# runc may send signals to container processes (for "docker stop").
|
||||
signal (receive) peer=runc,
|
||||
# crun may send signals to container processes (for "docker stop" when used with crun OCI runtime).
|
||||
signal (receive) peer=crun,
|
||||
# dockerd may send signals to container processes (for "docker kill").
|
||||
signal (receive) peer={{.DaemonProfile}},
|
||||
# Container processes may send signals amongst themselves.
|
||||
|
||||
@@ -39,6 +39,7 @@ var nativeToSeccomp = map[string]specs.Arch{
|
||||
"ppc": specs.ArchPPC,
|
||||
"ppc64": specs.ArchPPC64,
|
||||
"ppc64le": specs.ArchPPC64LE,
|
||||
"riscv64": specs.ArchRISCV64,
|
||||
"s390": specs.ArchS390,
|
||||
"s390x": specs.ArchS390X,
|
||||
}
|
||||
@@ -57,6 +58,7 @@ var goToNative = map[string]string{
|
||||
"ppc": "ppc",
|
||||
"ppc64": "ppc64",
|
||||
"ppc64le": "ppc64le",
|
||||
"riscv64": "riscv64",
|
||||
"s390": "s390",
|
||||
"s390x": "s390x",
|
||||
}
|
||||
|
||||
94
vendor.mod
94
vendor.mod
@@ -4,25 +4,25 @@
|
||||
|
||||
module github.com/docker/docker
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.2.3
|
||||
cloud.google.com/go/logging v1.7.0
|
||||
cloud.google.com/go/logging v1.8.1
|
||||
code.cloudfoundry.org/clock v1.1.0
|
||||
dario.cat/mergo v1.0.0
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1
|
||||
github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0
|
||||
github.com/Microsoft/go-winio v0.6.1
|
||||
github.com/Microsoft/go-winio v0.6.2
|
||||
github.com/Microsoft/hcsshim v0.11.4
|
||||
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.6
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.16
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.16
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.15.17
|
||||
github.com/aws/smithy-go v1.13.5
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.32.0
|
||||
github.com/aws/smithy-go v1.19.0
|
||||
github.com/cloudflare/cfssl v1.6.4
|
||||
github.com/containerd/cgroups/v3 v3.0.3
|
||||
github.com/containerd/containerd v1.7.12
|
||||
@@ -45,7 +45,7 @@ require (
|
||||
github.com/godbus/dbus/v5 v5.1.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.5.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
@@ -91,42 +91,45 @@ require (
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
github.com/vishvananda/netns v0.0.4
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0
|
||||
go.opentelemetry.io/otel v1.19.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0
|
||||
go.opentelemetry.io/otel/sdk v1.19.0
|
||||
go.opentelemetry.io/otel/trace v1.19.0
|
||||
golang.org/x/mod v0.11.0
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.16.0
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0
|
||||
go.opentelemetry.io/otel/metric v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
golang.org/x/mod v0.17.0
|
||||
golang.org/x/net v0.23.0
|
||||
golang.org/x/sync v0.5.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/text v0.14.0
|
||||
golang.org/x/time v0.3.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98
|
||||
google.golang.org/grpc v1.58.3
|
||||
google.golang.org/protobuf v1.31.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
gotest.tools/v3 v3.5.1
|
||||
resenje.org/singleflight v0.4.1
|
||||
tags.cncf.io/container-device-interface v0.6.2
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.110.4 // indirect
|
||||
cloud.google.com/go/compute v1.21.0 // indirect
|
||||
cloud.google.com/go v0.110.8 // indirect
|
||||
cloud.google.com/go/compute v1.23.0 // indirect
|
||||
cloud.google.com/go/longrunning v0.5.1 // indirect
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
|
||||
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
@@ -146,7 +149,7 @@ require (
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
@@ -156,8 +159,8 @@ require (
|
||||
github.com/google/certificate-transparency-go v1.1.4 // indirect
|
||||
github.com/google/s2a-go v0.1.4 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -194,22 +197,21 @@ require (
|
||||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||
go.etcd.io/etcd/server/v3 v3.5.6 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/tools v0.10.0 // indirect
|
||||
google.golang.org/api v0.126.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/tools v0.16.0 // indirect
|
||||
google.golang.org/api v0.128.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
|
||||
223
vendor.sum
223
vendor.sum
@@ -19,23 +19,24 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk=
|
||||
cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk=
|
||||
cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
|
||||
cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I=
|
||||
cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
|
||||
cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4=
|
||||
cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
|
||||
cloud.google.com/go/logging v1.8.1 h1:26skQWPeYhvIasWKm48+Eq7oUqdcdbwsCVwz5Ys0FvU=
|
||||
cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI=
|
||||
cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI=
|
||||
cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
@@ -119,12 +120,13 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0
|
||||
github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/Microsoft/hcsshim v0.8.10/go.mod h1:g5uw8EV2mAlzqe94tfNBNdr89fnbD/n3HV0OhsddkmM=
|
||||
@@ -147,7 +149,9 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0=
|
||||
github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84=
|
||||
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
|
||||
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -182,36 +186,36 @@ github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
||||
github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.13/go.mod h1:xSyvSnzh0KLs5H4HJGeIEsNYemUWdNIl0b/rP6SIsLU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.16 h1:4r7gsCu8Ekwl5iJGE/GmspA2UifqySCCkyyyPFeWs3w=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.16/go.mod h1:XjM6lVbq7UgELp9NjXBrb1DQY/ownlWsvDhEQksemJc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 h1:GgToSxaENX/1zXIGNFfiVk4hxryYJ5Vt4Mh8XLAL7Lc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.16/go.mod h1:KP7aFJhfwPFgx9aoVYL2nYHjya5WBD98CWaadpgmnpY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 h1:5qyqXASrX2zy5cTnoHHa4N2c3Lc94GH7gjnBP3GwKdU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24/go.mod h1:neYVaeKr5eT7BzwULuG2YbLhzWZ22lpjKdCybR7AXrQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.20/go.mod h1:gdZ5gRUaxThXIZyZQ8MTtgYBk2jbHgp05BO3GcD9Cwc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.14/go.mod h1:GEV9jaDPIgayiU+uevxwozcvUOjc+P4aHE2BeSjm2vE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 h1:hf+Vhp5WtTdcSdE+yEcUz8L73sAzN0R+0jQv+Z51/mI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31/go.mod h1:5zUjguZfG5qjhG9/wqmuyHRyUftl2B5Cp6NNxNC6kRA=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.15.17 h1:cDudPvUMS1LzoXgwhAVqUoaOK3PY7oCSL4pGmQmxlSk=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.15.17/go.mod h1:60NdwPCecURV9rIq6Hg8U4kOsKsz1aXvAAYNKlhG9+E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 h1:c5qGfdbCHav6viBwiyDns3OXqhqAbGjfIB4uVu2ayhk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24/go.mod h1:HMA4FZG6fyib+NDo5bpIxX1EhYjrAOveZJY2YR0xrNE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 h1:bdKIX6SVF3nc3xJFw6Nf0igzS6Ff/louGq8Z6VP/3Hs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5/go.mod h1:vuWiaDB30M/QTC+lI3Wj6S/zb7tpUK2MSYgy3Guh2L0=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 h1:xLPZMyuZ4GuqRCIec/zWuIhRFPXh2UOJdLXBSi64ZWQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5/go.mod h1:QjxpHmCwAg0ESGtPQnLIVp7SedTOBMYy+Slr3IfMKeI=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 h1:rIFn5J3yDoeuKCE9sESXqM5POTAhOP1du3bv/qTL+tE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6/go.mod h1:48WJ9l3dwP0GSHWGc5sFGGlCkuA82Mc2xnw+T6Q8aDw=
|
||||
github.com/aws/smithy-go v1.13.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.32.0 h1:VdKYfVPIDzmfSQk5gOQ5uueKiuKMkJuB/KOXmQ9Ytag=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.32.0/go.mod h1:jZNaJEtn9TLi3pfxycLz79HVkKxP8ZdYm92iaNFgBsA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
|
||||
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
|
||||
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@@ -276,6 +280,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
||||
@@ -285,6 +290,7 @@ github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOi
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
|
||||
github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
@@ -443,6 +449,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@@ -457,6 +464,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
|
||||
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
||||
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
@@ -484,8 +492,8 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
@@ -506,6 +514,7 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
|
||||
github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
|
||||
github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
|
||||
@@ -548,7 +557,8 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w
|
||||
github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 h1:xisWqjiKEff2B0KfFYGpCqc3M3zdTz+OHQHRc09FeYk=
|
||||
github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -581,8 +591,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
|
||||
@@ -622,7 +632,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
@@ -663,14 +672,14 @@ github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
|
||||
github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
|
||||
github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
@@ -788,8 +797,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk=
|
||||
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
|
||||
@@ -829,6 +836,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
@@ -972,8 +980,10 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
@@ -984,6 +994,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
@@ -1032,6 +1043,7 @@ github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
|
||||
github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee h1:P6U24L02WMfj9ymZTxl7CxS73JC99x3ukk+DBkgQGQs=
|
||||
github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee/go.mod h1:3uODdxMgOaPYeWU7RzZLxVtJHZ/x1f/iHkBZuKJDzuY=
|
||||
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
|
||||
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
@@ -1098,12 +1110,14 @@ github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/rexray/gocsi v1.2.2 h1:h9F/eSizORihN+XT+mxhq7ClZ3cYo1L9RvasN6dKz8U=
|
||||
github.com/rexray/gocsi v1.2.2/go.mod h1:X9oJHHpIVGmfKdK8e+JuCXafggk7HxL9mWQOgrsoHpo=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rootless-containers/rootlesskit/v2 v2.0.0 h1:oAtnD6sgsNmdxXCm5zont5nEYTAkSqExmQOalZYiYJM=
|
||||
github.com/rootless-containers/rootlesskit/v2 v2.0.0/go.mod h1:G7x0sK6onoLhFYZahpSsM/HaWEAh664SK5nCeQs4MQE=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
@@ -1201,6 +1215,7 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
@@ -1273,6 +1288,7 @@ github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfD
|
||||
github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
@@ -1326,31 +1342,31 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 h1:RsQi0qJ2imFfCvZabqzM9cNXBG8k6gXMv1A0cXRmH6A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0/go.mod h1:vsh3ySueQCiKPxFLvjWC4Z135gIa34TQ/NSqkDTZYUM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 h1:2ea0IkZBsWH+HA2GkD+7+hRw2u97jzdFyRtXuO14a1s=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0/go.mod h1:4m3RnBBb+7dB9d21y510oO1pdB1V4J6smNf14WXcBFQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 h1:gbhw/u49SS3gkPWiYweQNJGm/uJN5GkI/FrosxSHT7A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1/go.mod h1:GnOaBaFQ2we3b9AGWJpsBa7v1S5RlQzlC3O7dRMxZhM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
|
||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
||||
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
||||
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
@@ -1361,7 +1377,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
||||
@@ -1395,8 +1412,8 @@ golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWP
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1409,8 +1426,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -1437,8 +1454,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1492,8 +1509,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -1504,8 +1521,8 @@ golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4Iltr
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1520,8 +1537,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1615,13 +1632,14 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1720,8 +1738,8 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
|
||||
golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
|
||||
golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
|
||||
golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1752,8 +1770,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
|
||||
google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
|
||||
google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg=
|
||||
google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1800,12 +1818,12 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g=
|
||||
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8=
|
||||
google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
@@ -1830,8 +1848,8 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1845,8 +1863,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1855,6 +1873,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@@ -1932,7 +1951,9 @@ k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuB
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
kernel.org/pub/linux/libs/security/libcap/cap v1.2.67 h1:sPQ9qlSNR26fToTKbxe/HDWJlXvBLqGmt84LGCQkOy0=
|
||||
kernel.org/pub/linux/libs/security/libcap/cap v1.2.67/go.mod h1:GkntoBuwffz19qtdFVB+k2NtWNN+yCKnC/Ykv/hMiTU=
|
||||
kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 h1:NxbXJ7pDVq0FKBsqjieT92QDXI2XaqH2HAi4QcCOHt8=
|
||||
kernel.org/pub/linux/libs/security/libcap/psx v1.2.67/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
|
||||
17
vendor/cloud.google.com/go/.release-please-manifest-individual.json
generated
vendored
17
vendor/cloud.google.com/go/.release-please-manifest-individual.json
generated
vendored
@@ -1,13 +1,14 @@
|
||||
{
|
||||
"bigquery": "1.52.0",
|
||||
"bigtable": "1.18.1",
|
||||
"datastore": "1.12.0",
|
||||
"auth": "0.0.0",
|
||||
"bigquery": "1.55.0",
|
||||
"bigtable": "1.19.0",
|
||||
"datastore": "1.14.0",
|
||||
"errorreporting": "0.3.0",
|
||||
"firestore": "1.11.0",
|
||||
"logging": "1.7.0",
|
||||
"firestore": "1.12.0",
|
||||
"logging": "1.8.1",
|
||||
"profiler": "0.3.1",
|
||||
"pubsub": "1.32.0",
|
||||
"pubsub": "1.33.0",
|
||||
"pubsublite": "1.8.1",
|
||||
"spanner": "1.47.0",
|
||||
"storage": "1.31.0"
|
||||
"spanner": "1.49.0",
|
||||
"storage": "1.33.0"
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user