Compare commits

..

34 Commits

Author SHA1 Message Date
Victor Vieux
4845c567eb bump to 17.04.0 GA
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-04-03 10:15:25 -07:00
Victor Vieux
30686111ad install: map debian_version 9.X to debian stretch without lsb_release
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
(cherry picked from commit f8360d002d)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-29 13:47:04 -07:00
Victor Vieux
2f35d73b7d bump to rc2
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-29 00:16:40 -07:00
Victor Vieux
d408b829fb Merge pull request #32190 from vieux/cherry-picks-17.04-rc2
cherry picks 17.04.0-rc2
2017-03-28 23:50:27 -07:00
Tonis Tiigi
164f1d362e Return proper exit code on builder panic
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
(cherry picked from commit 385f1174ff)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:34:11 -07:00
French Ben
b82fe694d4 docs: added support for CLI yaml file generation
Signed-off-by: French Ben <frenchben@docker.com>
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 5443f0152f)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:29:36 -07:00
Jim Minter
9c05b1049f Resolve connection reset by peer regression
Signed-off-by: Jim Minter <jminter@redhat.com>
(cherry picked from commit dc0ee98805)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:26:22 -07:00
Vincent Demeester
fbd3d64212 Replace API version comparison by a if
… as we don't know for sure what API version it will be at that time.

Signed-off-by: Vincent Demeester <vincent@sbr.pm>
(cherry picked from commit 0f9d22cd66)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:26:12 -07:00
Victor Vieux
c97450059d Revert "Planned 1.13 deprecation: email from login"
This reverts commit a66efbddb8.

Signed-off-by: Victor Vieux <victorvieux@gmail.com>
(cherry picked from commit 4bce232139)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:26:02 -07:00
Sebastiaan van Stijn
81075fa86c update "docker daemon" deprecation message for new version scheme
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ba76a0c912)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:24:53 -07:00
Steve Durrheimer
76c8cbd1fc Add zsh completion for 'docker build --add-host'
Signed-off-by: Steve Durrheimer <s.durrheimer@gmail.com>
(cherry picked from commit fbedc588be)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:24:44 -07:00
Manjunath A Kumatagi
62581bb11c Fix Healthcheck API doc
Signed-off-by: Manjunath A Kumatagi <mkumatag@in.ibm.com>
(cherry picked from commit 4df54695ef)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:24:35 -07:00
Vincent Demeester
101369979a Add cli reference docs for root-level commands
Signed-off-by: Vincent Demeester <vincent@sbr.pm>
(cherry picked from commit 9cea26bc77)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:24:23 -07:00
Vincent Demeester
b721d55399 Set the alias to the service name instead of the network name
This makes it work a little closer to compose part and it is more
correct 👼

Signed-off-by: Vincent Demeester <vincent@sbr.pm>
(cherry picked from commit ee08c8542a)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:24:07 -07:00
Darren Stahl
6ddedae595 Windows:Revert change to wait for OOBE
Signed-off-by: Darren Stahl <darst@microsoft.com>
(cherry picked from commit 6eed7f0cac)
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-28 17:23:58 -07:00
Victor Vieux
73b512220d Merge pull request #31977 from cyli/bump-go-connections-17.04
Bump go connections for 17.04 and use either system pool or custom CA pool when connecting from client->daemon [17.04]
2017-03-23 17:07:28 -07:00
Victor Vieux
5550606c15 Merge pull request #32033 from AkihiroSuda/cherrypick-30781
[17.04] compose: fix environment interpolation from the client
2017-03-23 10:10:42 -07:00
Akihiro Suda
fac9e24c7c compose: update the comment about MappingWithEquals
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
2017-03-23 02:09:26 +00:00
Daniel Nephin
7a4482a144 Fix environment resolving.
Load from env should only happen if the value is unset.
Extract a buildEnvironment function and revert some changes to tests.

Signed-off-by: Daniel Nephin <dnephin@docker.com>
2017-03-23 02:09:26 +00:00
Akihiro Suda
88c18fb45c compose: fix environment interpolation from the client
For an environment variable defined in the yaml without value,
the value needs to be propagated from the client, as in Docker Compose.

Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
2017-03-23 02:09:26 +00:00
Sebastiaan van Stijn
94c4b33730 Merge pull request #31970 from vdemeester/cherry-pick-31621
[17.04] Cherry-pick: Windows: Fix Docker hanging with named pipes and stdin
2017-03-21 21:20:49 +01:00
Ying Li
60d17202f0 Use either the system root pool or an empty cert pool with custom CA roots,
and not a joint system+custom CA roots pool, when connecting from a docker
client to a remote daemon

Signed-off-by: Ying Li <ying.li@docker.com>
2017-03-21 10:54:56 -07:00
Ying Li
3c05ff4aef Bump the version of go-connections
Signed-off-by: Ying Li <ying.li@docker.com>
2017-03-21 10:40:39 -07:00
David Gageot
f3cc477e7d Windows: Fix Docker hanging with named pipes and stdin
Fixes #31922

Vendor a fork of Microsoft/go-winio that has this
PR (https://github.com/Microsoft/go-winio/pull/43)
merged


Signed-off-by: David Gageot <david@gageot.net>
2017-03-21 16:13:41 +01:00
Vincent Demeester
9b6603e124 Merge pull request #31870 from aaronlehmann/vendor-swarmkit-d316a73f
[17.04] Vendor swarmkit d316a73
2017-03-21 09:46:03 +01:00
Victor Vieux
9b80fad9b4 Merge pull request #31899 from thaJeztah/17.04-cherry-pick-31894
[17.04] cherry pick 31894 - Fix compose schema id for v3.2
2017-03-16 15:36:02 -07:00
Sebastiaan van Stijn
581c601408 Merge pull request #31894 from dnephin/fix-schema-id
Fix compose schema id for v3.2
(cherry picked from commit 3eaeee07c3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-03-16 21:21:49 +01:00
Vincent Demeester
f15f593751 Merge pull request #31848 from thaJeztah/17.04.0-changelog-updates
[17.04.x] deprecation and changelog updates
2017-03-16 10:08:27 +01:00
Aaron Lehmann
9bede621f1 Vendor swarmkit d316a73
Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
2017-03-15 17:17:58 -07:00
Victor Vieux
d2532c636f Merge pull request #31853 from mlaventure/update-containerd-17.04
[17.04] Update containerd to version 422e31ce907fd9c3833a38d7b8fdd023e5a76e73
2017-03-15 10:51:54 -07:00
Sebastiaan van Stijn
2387faa6de deprecation and changelog updates for 17.04
- Interacting with V1 registries is not yet disabled by default, so moving the deprecation version
- Added various entries to the changelog, and moved some to a different section

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2017-03-15 18:40:47 +01:00
Kenfe-Mickael Laventure
99280133c2 Update containerd to version 422e31ce907fd9c3833a38d7b8fdd023e5a76e73
This fixes an issue preventing containerd from starting if the state
directory didn't exist already.

Signed-off-by: Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
2017-03-15 08:09:54 -07:00
Victor Vieux
b2fc918c5a Merge pull request #31811 from vieux/bump_17.04.0
bump 17.04.0-rc1
2017-03-14 20:40:32 -07:00
Victor Vieux
6d027bc8f7 bump 17.04.0-rc1
Signed-off-by: Victor Vieux <victorvieux@gmail.com>
2017-03-14 20:35:50 -07:00
9228 changed files with 618527 additions and 1550258 deletions

View File

@@ -1,4 +1,7 @@
bundles
.gopath
vendor/pkg
.go-pkg-cache
.git
bundles/
cli/winresources/**/winres.json
cli/winresources/**/*.syso
hack/integration-cli-on-swarm/integration-cli-on-swarm

3
.gitattributes vendored
View File

@@ -1,3 +0,0 @@
Dockerfile* linguist-language=Dockerfile
vendor.mod linguist-language=Go-Module
vendor.sum linguist-language=Go-Checksums

15
.github/CODEOWNERS vendored
View File

@@ -1,15 +0,0 @@
# GitHub code owners
# See https://help.github.com/articles/about-codeowners/
#
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
builder/** @tonistiigi
contrib/mkimage/** @tianon
daemon/graphdriver/devmapper/** @rhvgoyal
daemon/graphdriver/overlay/** @dmcgowan
daemon/graphdriver/overlay2/** @dmcgowan
daemon/graphdriver/windows/** @johnstep
daemon/logger/awslogs/** @samuelkarp
hack/** @tianon
plugin/** @cpuguy83
project/** @thaJeztah

View File

@@ -10,25 +10,19 @@ information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-other-issues
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#reporting-other-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support for **docker** can be found at the following locations:
General support can be found at the following locations:
- Docker Support Forums - https://forums.docker.com
- Slack - community.docker.com #general channel
- IRC - irc.freenode.net #docker channel
- Post a question on StackOverflow, using the Docker tag
General support for **moby** can be found at the following locations:
- Moby Project Forums - https://forums.mobyproject.org
- Slack - community.docker.com #moby-project channel
- Post a question on StackOverflow, using the Moby tag
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------

View File

@@ -1,6 +1,6 @@
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
https://github.com/docker/docker/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **

View File

@@ -1,27 +0,0 @@
name: 'Setup Runner'
description: 'Composite action to set up the GitHub Runner for jobs in the test.yml workflow'
runs:
using: composite
steps:
- run: |
sudo modprobe ip_vs
sudo modprobe ipv6
sudo modprobe ip6table_filter
sudo modprobe -r overlay
sudo modprobe overlay redirect_dir=off
shell: bash
- run: |
if [ ! -e /etc/docker/daemon.json ]; then
echo '{}' | tee /etc/docker/daemon.json >/dev/null
fi
DOCKERD_CONFIG=$(jq '.+{"experimental":true,"live-restore":true,"ipv6":true,"fixed-cidr-v6":"2001:db8:1::/64"}' /etc/docker/daemon.json)
sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null
sudo service docker restart
shell: bash
- run: |
./contrib/check-config.sh || true
shell: bash
- run: |
docker info
shell: bash

View File

@@ -1,48 +0,0 @@
# reusable workflow
name: .dco
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
on:
workflow_call:
env:
ALPINE_VERSION: 3.16
jobs:
run:
runs-on: ubuntu-20.04
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Dump context
uses: actions/github-script@v6
with:
script: |
console.log(JSON.stringify(context, null, 2));
-
name: Get base ref
id: base-ref
uses: actions/github-script@v6
with:
result-encoding: string
script: |
if (/^refs\/pull\//.test(context.ref) && context.payload?.pull_request?.base?.ref != undefined) {
return context.payload.pull_request.base.ref;
}
return context.ref.replace(/^refs\/heads\//g, '');
-
name: Validate
run: |
docker run --rm \
-v "$(pwd):/workspace" \
-e VALIDATE_REPO \
-e VALIDATE_BRANCH \
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
env:
VALIDATE_REPO: ${{ github.server_url }}/${{ github.repository }}.git
VALIDATE_BRANCH: ${{ steps.base-ref.outputs.result }}

View File

@@ -1,498 +0,0 @@
# reusable workflow
name: .windows
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
on:
workflow_call:
inputs:
os:
required: true
type: string
send_coverage:
required: false
type: boolean
default: false
env:
GO_VERSION: 1.19.3
GOTESTLIST_VERSION: v0.2.0
TESTSTAT_VERSION: v0.1.3
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
WINDOWS_BASE_TAG_2019: ltsc2019
WINDOWS_BASE_TAG_2022: ltsc2022
TEST_IMAGE_NAME: moby:test
TEST_CTN_NAME: moby
DOCKER_BUILDKIT: 0
ITG_CLI_MATRIX_SIZE: 6
jobs:
build:
runs-on: ${{ inputs.os }}
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
BIN_OUT: ${{ github.workspace }}\out
defaults:
run:
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
name: Env
run: |
Get-ChildItem Env: | Out-String
-
name: Init
run: |
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
If ("${{ inputs.os }}" -eq "windows-2019") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
-
name: Cache
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
${{ github.workspace }}\go-build
${{ env.GOPATH }}\pkg\mod
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
restore-keys: |
${{ inputs.os }}-${{ github.job }}-
-
name: Docker info
run: |
docker info
-
name: Build base image
run: |
docker pull ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }}
docker tag ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }} microsoft/windowsservercore
docker build --build-arg GO_VERSION -t ${{ env.TEST_IMAGE_NAME }} -f Dockerfile.windows .
-
name: Build binaries
run: |
& docker run --name ${{ env.TEST_CTN_NAME }} -e "DOCKER_GITCOMMIT=${{ github.sha }}" `
-v "${{ github.workspace }}\go-build:C:\Users\ContainerAdministrator\AppData\Local\go-build" `
-v "${{ github.workspace }}\go\pkg\mod:C:\gopath\pkg\mod" `
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -Daemon -Client
-
name: Copy artifacts
run: |
New-Item -ItemType "directory" -Path "${{ env.BIN_OUT }}"
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\docker.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\src\github.com\docker\docker\bundles\dockerd.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\gopath\bin\gotestsum.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd.exe" ${{ env.BIN_OUT }}\
docker cp "${{ env.TEST_CTN_NAME }}`:c`:\containerd\bin\containerd-shim-runhcs-v1.exe" ${{ env.BIN_OUT }}\
-
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: build-${{ inputs.os }}
path: ${{ env.BIN_OUT }}/*
if-no-files-found: error
retention-days: 2
unit-test:
runs-on: ${{ inputs.os }}
timeout-minutes: 120
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
defaults:
run:
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
name: Env
run: |
Get-ChildItem Env: | Out-String
-
name: Init
run: |
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
New-Item -ItemType "directory" -Path "bundles"
If ("${{ inputs.os }}" -eq "windows-2019") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
-
name: Cache
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
${{ github.workspace }}\go-build
${{ env.GOPATH }}\pkg\mod
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
restore-keys: |
${{ inputs.os }}-${{ github.job }}-
-
name: Docker info
run: |
docker info
-
name: Build base image
run: |
docker pull ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }}
docker tag ${{ env.WINDOWS_BASE_IMAGE }}:${{ env.WINDOWS_BASE_IMAGE_TAG }} microsoft/windowsservercore
docker build --build-arg GO_VERSION -t ${{ env.TEST_IMAGE_NAME }} -f Dockerfile.windows .
-
name: Test
run: |
& docker run --name ${{ env.TEST_CTN_NAME }} -e "DOCKER_GITCOMMIT=${{ github.sha }}" `
-v "${{ github.workspace }}\go-build:C:\Users\ContainerAdministrator\AppData\Local\go-build" `
-v "${{ github.workspace }}\go\pkg\mod:C:\gopath\pkg\mod" `
-v "${{ env.GOPATH }}\src\github.com\docker\docker\bundles:C:\gopath\src\github.com\docker\docker\bundles" `
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -TestUnit
-
name: Send to Codecov
if: inputs.send_coverage
uses: codecov/codecov-action@v3
with:
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
directory: bundles
env_vars: RUNNER_OS
flags: unit
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.os }}-unit-reports
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
unit-test-report:
runs-on: ubuntu-latest
if: always()
needs:
- unit-test
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download artifacts
uses: actions/download-artifact@v3
with:
name: ${{ inputs.os }}-unit-reports
path: /tmp/artifacts
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
integration-test-prepare:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.tests.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Install gotestlist
run:
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
-
name: Create matrix
id: tests
working-directory: ./integration-cli
run: |
# Distribute integration-cli tests for the matrix in integration-test job.
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.tests.outputs.matrix }}
integration-test:
runs-on: ${{ inputs.os }}
timeout-minutes: 120
needs:
- build
- integration-test-prepare
strategy:
fail-fast: false
matrix:
runtime:
- builtin
- containerd
test: ${{ fromJson(needs.integration-test-prepare.outputs.matrix) }}
env:
GOPATH: ${{ github.workspace }}\go
GOBIN: ${{ github.workspace }}\go\bin
BIN_OUT: ${{ github.workspace }}\out
defaults:
run:
working-directory: ${{ env.GOPATH }}/src/github.com/docker/docker
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
path: ${{ env.GOPATH }}/src/github.com/docker/docker
-
name: Env
run: |
Get-ChildItem Env: | Out-String
-
name: Download artifacts
uses: actions/download-artifact@v3
with:
name: build-${{ inputs.os }}
path: ${{ env.BIN_OUT }}
-
name: Init
run: |
New-Item -ItemType "directory" -Path "bundles"
If ("${{ inputs.os }}" -eq "windows-2019") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
-
# removes docker service that is currently installed on the runner. we
# could use Uninstall-Package but not yet available on Windows runners.
# more info: https://github.com/actions/virtual-environments/blob/d3a5bad25f3b4326c5666bab0011ac7f1beec95e/images/win/scripts/Installers/Install-Docker.ps1#L11
name: Removing current daemon
run: |
if (Get-Service docker -ErrorAction SilentlyContinue) {
$dockerVersion = (docker version -f "{{.Server.Version}}")
Write-Host "Current installed Docker version: $dockerVersion"
# remove service
Stop-Service -Force -Name docker
Remove-Service -Name docker
# removes event log entry. we could use "Remove-EventLog -LogName -Source docker"
# but this cmd is not available atm
$ErrorActionPreference = "SilentlyContinue"
& reg delete "HKLM\SYSTEM\CurrentControlSet\Services\EventLog\Application\docker" /f 2>&1 | Out-Null
$ErrorActionPreference = "Stop"
Write-Host "Service removed"
}
-
name: Starting containerd
if: matrix.runtime == 'containerd'
run: |
Write-Host "Generating config"
& "${{ env.BIN_OUT }}\containerd.exe" config default | Out-File "$env:TEMP\ctn.toml" -Encoding ascii
Write-Host "Creating service"
New-Item -ItemType Directory "$env:TEMP\ctn-root" -ErrorAction SilentlyContinue | Out-Null
New-Item -ItemType Directory "$env:TEMP\ctn-state" -ErrorAction SilentlyContinue | Out-Null
Start-Process -Wait "${{ env.BIN_OUT }}\containerd.exe" `
-ArgumentList "--log-level=debug", `
"--config=$env:TEMP\ctn.toml", `
"--address=\\.\pipe\containerd-containerd", `
"--root=$env:TEMP\ctn-root", `
"--state=$env:TEMP\ctn-state", `
"--log-file=$env:TEMP\ctn.log", `
"--register-service"
Write-Host "Starting service"
Start-Service -Name containerd
Start-Sleep -Seconds 5
Write-Host "Service started successfully!"
-
name: Starting test daemon
run: |
Write-Host "Creating service"
If ("${{ matrix.runtime }}" -eq "containerd") {
$runtimeArg="--containerd=\\.\pipe\containerd-containerd"
echo "DOCKER_WINDOWS_CONTAINERD_RUNTIME=1" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
}
New-Item -ItemType Directory "$env:TEMP\moby-root" -ErrorAction SilentlyContinue | Out-Null
New-Item -ItemType Directory "$env:TEMP\moby-exec" -ErrorAction SilentlyContinue | Out-Null
Start-Process -Wait -NoNewWindow "${{ env.BIN_OUT }}\dockerd" `
-ArgumentList $runtimeArg, "--debug", `
"--host=npipe:////./pipe/docker_engine", `
"--data-root=$env:TEMP\moby-root", `
"--exec-root=$env:TEMP\moby-exec", `
"--pidfile=$env:TEMP\docker.pid", `
"--register-service"
Write-Host "Starting service"
Start-Service -Name docker
Write-Host "Service started successfully!"
-
name: Waiting for test daemon to start
run: |
$tries=20
Write-Host "Waiting for the test daemon to start..."
While ($true) {
$ErrorActionPreference = "SilentlyContinue"
& "${{ env.BIN_OUT }}\docker" version
$ErrorActionPreference = "Stop"
If ($LastExitCode -eq 0) {
break
}
$tries--
If ($tries -le 0) {
Throw "Failed to get a response from the daemon"
}
Write-Host -NoNewline "."
Start-Sleep -Seconds 1
}
Write-Host "Test daemon started and replied!"
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Docker info
run: |
& "${{ env.BIN_OUT }}\docker" info
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Building contrib/busybox
run: |
& "${{ env.BIN_OUT }}\docker" build -t busybox `
--build-arg WINDOWS_BASE_IMAGE `
--build-arg WINDOWS_BASE_IMAGE_TAG `
.\contrib\busybox\
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: List images
run: |
& "${{ env.BIN_OUT }}\docker" images
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Test integration
if: matrix.test == './...'
run: |
.\hack\make.ps1 -TestIntegration
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
GO111MODULE: "off"
TEST_CLIENT_BINARY: ${{ env.BIN_OUT }}\docker
-
name: Test integration-cli
if: matrix.test != './...'
run: |
.\hack\make.ps1 -TestIntegrationCli
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
GO111MODULE: "off"
TEST_CLIENT_BINARY: ${{ env.BIN_OUT }}\docker
INTEGRATION_TESTRUN: ${{ matrix.test }}
-
name: Send to Codecov
if: inputs.send_coverage
uses: codecov/codecov-action@v3
with:
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
directory: bundles
env_vars: RUNNER_OS
flags: integration,${{ matrix.runtime }}
-
name: Docker info
run: |
& "${{ env.BIN_OUT }}\docker" info
env:
DOCKER_HOST: npipe:////./pipe/docker_engine
-
name: Stop containerd
if: always() && matrix.runtime == 'containerd'
run: |
$ErrorActionPreference = "SilentlyContinue"
Stop-Service -Force -Name containerd
$ErrorActionPreference = "Stop"
-
name: Containerd logs
if: always() && matrix.runtime == 'containerd'
run: |
Copy-Item "$env:TEMP\ctn.log" -Destination ".\bundles\containerd.log"
Get-Content "$env:TEMP\ctn.log" | Out-Host
-
name: Stop daemon
if: always()
run: |
$ErrorActionPreference = "SilentlyContinue"
Stop-Service -Force -Name docker
$ErrorActionPreference = "Stop"
-
# as the daemon is registered as a service we have to check the event
# logs against the docker provider.
name: Daemon event logs
if: always()
run: |
Get-WinEvent -ea SilentlyContinue `
-FilterHashtable @{ProviderName= "docker"; LogName = "application"} |
Select-Object -Property TimeCreated, @{N='Detailed Message'; E={$_.Message}} |
Sort-Object @{Expression="TimeCreated";Descending=$false} |
Select-Object -ExpandProperty 'Detailed Message' | Tee-Object -file ".\bundles\daemon.log"
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.os }}-integration-reports-${{ matrix.runtime }}
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
integration-test-report:
runs-on: ubuntu-latest
if: always()
needs:
- integration-test
strategy:
fail-fast: false
matrix:
runtime:
- builtin
- containerd
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download artifacts
uses: actions/download-artifact@v3
with:
name: ${{ inputs.os }}-integration-reports-${{ matrix.runtime }}
path: /tmp/artifacts
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/artifacts -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY

View File

@@ -1,113 +0,0 @@
name: buildkit
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]{2}'
pull_request:
env:
BUNDLES_OUTPUT: ./bundles
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build:
runs-on: ubuntu-20.04
needs:
- validate-dco
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build
uses: docker/bake-action@v2
with:
targets: binary
-
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: binary
path: ${{ env.BUNDLES_OUTPUT }}
if-no-files-found: error
retention-days: 1
test:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build
strategy:
fail-fast: false
matrix:
pkg:
- client
- cmd/buildctl
- solver
- frontend
- frontend/dockerfile
typ:
- integration
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
path: moby
-
name: BuildKit ref
run: |
./hack/go-mod-prepare.sh
# FIXME(thaJeztah) temporarily overriding version to use for tests; remove with the next release of buildkit
# echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
echo "BUILDKIT_REF=4febae4f874bd8ef52dec30e988c8fe0bc96b3b9" >> $GITHUB_ENV
working-directory: moby
-
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
uses: actions/checkout@v3
with:
repository: "moby/buildkit"
ref: ${{ env.BUILDKIT_REF }}
path: buildkit
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Download binary artifacts
uses: actions/download-artifact@v3
with:
name: binary
path: ./buildkit/build/moby/
-
name: Update daemon.json
run: |
sudo rm /etc/docker/daemon.json
sudo service docker restart
docker version
docker info
-
name: Test
run: |
./hack/test ${{ matrix.typ }}
env:
CONTEXT: "."
TEST_DOCKERD: "1"
TEST_DOCKERD_BINARY: "./build/moby/binary-daemon/dockerd"
TESTPKGS: "./${{ matrix.pkg }}"
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=dockerd$"
working-directory: buildkit

View File

@@ -1,102 +0,0 @@
name: ci
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
tags:
- 'v*'
pull_request:
env:
BUNDLES_OUTPUT: ./bundles
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build:
runs-on: ubuntu-20.04
needs:
- validate-dco
strategy:
fail-fast: false
matrix:
target:
- binary
- dynbinary
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build
uses: docker/bake-action@v2
with:
targets: ${{ matrix.target }}
-
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.target }}
path: ${{ env.BUNDLES_OUTPUT }}
if-no-files-found: error
retention-days: 7
cross:
runs-on: ubuntu-20.04
needs:
- validate-dco
strategy:
fail-fast: false
matrix:
platform:
- linux/amd64
- linux/arm/v5
- linux/arm/v6
- linux/arm/v7
- linux/arm64
- linux/ppc64le
- linux/s390x
- windows/amd64
- windows/arm64
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build
uses: docker/bake-action@v2
with:
targets: cross
env:
DOCKER_CROSSPLATFORMS: ${{ matrix.platform }}
-
name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: cross-${{ env.PLATFORM_PAIR }}
path: ${{ env.BUNDLES_OUTPUT }}
if-no-files-found: error
retention-days: 7

View File

@@ -1,504 +0,0 @@
name: test
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
tags:
- 'v*'
pull_request:
env:
GO_VERSION: 1.19.3
GOTESTLIST_VERSION: v0.2.0
TESTSTAT_VERSION: v0.1.3
ITG_CLI_MATRIX_SIZE: 6
DOCKER_EXPERIMENTAL: 1
DOCKER_GRAPHDRIVER: overlay2
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
build-dev:
runs-on: ubuntu-20.04
needs:
- validate-dco
strategy:
fail-fast: false
matrix:
mode:
- ""
- systemd
steps:
-
name: Prepare
run: |
if [ "${{ matrix.mode }}" = "systemd" ]; then
echo "SYSTEMD=true" >> $GITHUB_ENV
fi
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
*.cache-from=type=gha,scope=dev${{ matrix.mode }}
*.cache-to=type=gha,scope=dev${{ matrix.mode }},mode=max
*.output=type=cacheonly
validate-prepare:
runs-on: ubuntu-20.04
needs:
- validate-dco
outputs:
matrix: ${{ steps.scripts.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Create matrix
id: scripts
run: |
scripts=$(jq -ncR '[inputs]' <<< "$(ls -I .validate -I all -I default -I dco -I golangci-lint.yml -I yamllint.yaml -A ./hack/validate/)")
echo "matrix=$scripts" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.scripts.outputs.matrix }}
validate:
runs-on: ubuntu-20.04
needs:
- validate-prepare
- build-dev
strategy:
fail-fast: true
matrix:
script: ${{ fromJson(needs.validate-prepare.outputs.matrix) }}
steps:
-
name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Validate
run: |
make -o build validate-${{ matrix.script }}
unit:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-unit
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Send to Codecov
uses: codecov/codecov-action@v3
with:
directory: ./bundles
env_vars: RUNNER_OS
flags: unit
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: unit-reports
path: /tmp/reports/*
unit-report:
runs-on: ubuntu-20.04
if: always()
needs:
- unit
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v3
with:
name: unit-reports
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
docker-py:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-docker-py
-
name: Prepare reports
if: always()
run: |
mkdir -p bundles /tmp/reports
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
sudo chown -R $(id -u):$(id -g) /tmp/reports
tree -nh /tmp/reports
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-docker-py/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: docker-py-reports
path: /tmp/reports/*
integration-flaky:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build-dev
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-integration-flaky
env:
TEST_SKIP_INTEGRATION_CLI: 1
integration:
runs-on: ${{ matrix.os }}
timeout-minutes: 120
needs:
- build-dev
strategy:
fail-fast: false
matrix:
os:
- ubuntu-20.04
- ubuntu-22.04
mode:
- ""
- rootless
- systemd
#- rootless-systemd FIXME: https://github.com/moby/moby/issues/44084
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Prepare
run: |
CACHE_DEV_SCOPE=dev
if [[ "${{ matrix.mode }}" == *"rootless"* ]]; then
echo "DOCKER_ROOTLESS=1" >> $GITHUB_ENV
fi
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
echo "SYSTEMD=true" >> $GITHUB_ENV
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
fi
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=${{ env.CACHE_DEV_SCOPE }}
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION_CLI: 1
TESTCOVERAGE: 1
-
name: Prepare reports
if: always()
run: |
reportsPath="/tmp/reports/${{ matrix.os }}"
if [ -n "${{ matrix.mode }}" ]; then
reportsPath="$reportsPath-${{ matrix.mode }}"
fi
mkdir -p bundles $reportsPath
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
-
name: Send to Codecov
uses: codecov/codecov-action@v3
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration,${{ matrix.mode }}
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: integration-reports
path: /tmp/reports/*
integration-report:
runs-on: ubuntu-20.04
if: always()
needs:
- integration
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v3
with:
name: integration-reports
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
integration-cli-prepare:
runs-on: ubuntu-20.04
needs:
- validate-dco
outputs:
matrix: ${{ steps.tests.outputs.matrix }}
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Install gotestlist
run:
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
-
name: Create matrix
id: tests
working-directory: ./integration-cli
run: |
# Distribute integration-cli tests for the matrix in integration-test job.
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
echo "matrix=$matrix" >> $GITHUB_OUTPUT
-
name: Show matrix
run: |
echo ${{ steps.tests.outputs.matrix }}
integration-cli:
runs-on: ubuntu-20.04
timeout-minutes: 120
needs:
- build-dev
- integration-cli-prepare
strategy:
fail-fast: false
matrix:
test: ${{ fromJson(needs.integration-cli-prepare.outputs.matrix) }}
steps:
-
name: Checkout
uses: actions/checkout@v3
-
name: Set up runner
uses: ./.github/actions/setup-runner
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build dev image
uses: docker/bake-action@v2
with:
targets: dev
set: |
dev.cache-from=type=gha,scope=dev
-
name: Test
run: |
make -o build test-integration
env:
TEST_SKIP_INTEGRATION: 1
TESTCOVERAGE: 1
TESTFLAGS: "-test.run (${{ matrix.test }})/"
-
name: Prepare reports
if: always()
run: |
reportsPath=/tmp/reports/$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
mkdir -p bundles $reportsPath
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
tar -xzf /tmp/reports.tar.gz -C $reportsPath
sudo chown -R $(id -u):$(id -g) $reportsPath
tree -nh $reportsPath
-
name: Send to Codecov
uses: codecov/codecov-action@v3
with:
directory: ./bundles/test-integration
env_vars: RUNNER_OS
flags: integration-cli
-
name: Test daemon logs
if: always()
run: |
cat bundles/test-integration/docker.log
-
name: Upload reports
if: always()
uses: actions/upload-artifact@v3
with:
name: integration-cli-reports
path: /tmp/reports/*
integration-cli-report:
runs-on: ubuntu-20.04
if: always()
needs:
- integration-cli
steps:
-
name: Set up Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
-
name: Download reports
uses: actions/download-artifact@v3
with:
name: integration-cli-reports
path: /tmp/reports
-
name: Install teststat
run: |
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
-
name: Create summary
run: |
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY

View File

@@ -1,22 +0,0 @@
name: windows-2019
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 10 * * *'
workflow_dispatch:
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
run:
needs:
- validate-dco
uses: ./.github/workflows/.windows.yml
with:
os: windows-2019
send_coverage: false

View File

@@ -1,25 +0,0 @@
name: windows-2022
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch:
push:
branches:
- 'master'
- '[0-9]+.[0-9]+'
pull_request:
jobs:
validate-dco:
uses: ./.github/workflows/.dco.yml
run:
needs:
- validate-dco
uses: ./.github/workflows/.windows.yml
with:
os: windows-2022
send_coverage: true

52
.gitignore vendored
View File

@@ -1,30 +1,34 @@
# If you want to ignore files created by your editor/tools, please consider a
# [global .gitignore](https://help.github.com/articles/ignoring-files).
*~
*.bak
# Docker project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
*.exe
*.exe~
*.orig
*.test
.*.swp
.DS_Store
thumbs.db
# local repository customization
.envrc
# a .bashrc may be added to customize the build environment
.bashrc
.editorconfig
# top-level go.mod is not meant to be checked in
/go.mod
# build artifacts
.gopath/
.go-pkg-cache/
autogen/
bundles/
cli/winresources/*/*.syso
cli/winresources/*/winres.json
contrib/builder/rpm/*/changelog
# ci artifacts
*.exe
*.gz
go-test-report.json
junit-report.xml
profile.out
test.main
cmd/dockerd/dockerd
cmd/docker/docker
dockerversion/version_autogen.go
dockerversion/version_autogen_unix.go
docs/AWS_S3_BUCKET
docs/GITCOMMIT
docs/GIT_BRANCH
docs/VERSION
docs/_build
docs/_static
docs/_templates
docs/changed-files
# generated by man/md2man-all.sh
man/man1
man/man5
man/man8
vendor/pkg/
hack/integration-cli-on-swarm/integration-cli-on-swarm

1023
.mailmap

File diff suppressed because it is too large Load Diff

922
AUTHORS

File diff suppressed because it is too large Load Diff

3469
CHANGELOG.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,14 @@
# Contribute to the Moby Project
# Contributing to Docker
Want to hack on the Moby Project? Awesome! We have a contributor's guide that explains
[setting up a development environment and the contribution
process](docs/contributing/).
Want to hack on Docker? Awesome! We have a contributor's guide that explains
[setting up a Docker development environment and the contribution
process](https://docs.docker.com/opensource/project/who-written-for/).
[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/)
This page contains information about reporting issues as well as some tips and
guidelines useful to experienced open source contributors. Finally, make sure
you read our [community guidelines](#moby-community-guidelines) before you
you read our [community guidelines](#docker-community-guidelines) before you
start participating.
## Topics
@@ -17,20 +17,20 @@ start participating.
* [Design and Cleanup Proposals](#design-and-cleanup-proposals)
* [Reporting Issues](#reporting-other-issues)
* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines)
* [Community Guidelines](#moby-community-guidelines)
* [Community Guidelines](#docker-community-guidelines)
## Reporting security issues
The Moby maintainers take security seriously. If you discover a security
The Docker maintainers take security seriously. If you discover a security
issue, please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[security@docker.com](mailto:security@docker.com).
Security reports are greatly appreciated and we will publicly thank you for it,
although we keep your name confidential if you request it. We also like to send
gifts&mdash;if you're into schwag, make sure to let us know. We currently do not
offer a paid security bounty program, but are not ruling it out in the future.
Security reports are greatly appreciated and we will publicly thank you for it.
We also like to send gifts&mdash;if you're into Docker schwag, make sure to let
us know. We currently do not offer a paid security bounty program, but are not
ruling it out in the future.
## Reporting other issues
@@ -39,7 +39,7 @@ A great way to contribute to the project is to send a detailed report when you
encounter an issue. We always appreciate a well-written, thorough bug report,
and will thank you for it!
Check that [our issue database](https://github.com/moby/moby/issues)
Check that [our issue database](https://github.com/docker/docker/issues)
doesn't already include that problem or suggestion before submitting an issue.
If you find a match, you can use the "subscribe" button to get notified on
updates. Do *not* leave random "+1" or "I have this too" comments, as they
@@ -66,7 +66,7 @@ This section gives the experienced contributor some tips and guidelines.
Not sure if that typo is worth a pull request? Found a bug and know how to fix
it? Do it! We will appreciate it. Any significant improvement should be
documented as [a GitHub issue](https://github.com/moby/moby/issues) before
documented as [a GitHub issue](https://github.com/docker/docker/issues) before
anybody starts working on it.
We are always thrilled to receive pull requests. We do our best to process them
@@ -83,7 +83,11 @@ contributions, see [the advanced contribution
section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in
the contributors guide.
### Connect with other Moby Project contributors
We try hard to keep Docker lean and focused. Docker can't do everything for
everybody. This means that we might decide against incorporating a new feature.
However, there might be a way to implement that feature *on top of* Docker.
### Talking to other Docker users and contributors
<table class="tg">
<col width="45%">
@@ -92,28 +96,52 @@ the contributors guide.
<td>Forums</td>
<td>
A public forum for users to discuss questions and explore current design patterns and
best practices about all the Moby projects. To participate, log in with your Github
account or create an account at <a href="https://forums.mobyproject.org" target="_blank">https://forums.mobyproject.org</a>.
best practices about Docker and related projects in the Docker Ecosystem. To participate,
just log in with your Docker Hub account on <a href="https://forums.docker.com" target="_blank">https://forums.docker.com</a>.
</td>
</tr>
<tr>
<td>Slack</td>
<td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
<td>
<p>
Register for the Docker Community Slack at
<a href="https://dockr.ly/slack" target="_blank">https://dockr.ly/slack</a>.
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
IRC a direct line to our most knowledgeable Docker users; we have
both the <code>#docker</code> and <code>#docker-dev</code> group on
<strong>irc.freenode.net</strong>.
IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p>
<p>
Read our <a href="https://docs.docker.com/opensource/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a>
for an easy way to get started.
</p>
</td>
</tr>
<tr>
<td>Google Group</td>
<td>
The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
group is for contributors and other people contributing to the Docker project.
You can join them without a google account by sending an email to
<a href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
After receiving the join-request message, you can simply reply to that to confirm the subscription.
</td>
</tr>
<tr>
<td>Twitter</td>
<td>
You can follow <a href="https://twitter.com/moby/" target="_blank">Moby Project Twitter feed</a>
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
to get updates on our products. You can also tweet us questions or just
share blogs or stories.
</td>
</tr>
<tr>
<td>Stack Overflow</td>
<td>
Stack Overflow has over 17000 Docker questions listed. We regularly
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
and so do many other knowledgeable Docker users.
</td>
</tr>
</table>
@@ -127,11 +155,10 @@ Fork the repository and make changes on your fork in a feature branch:
your intentions, and name it XXXX-something where XXXX is the number of the
issue.
Submit tests for your changes. See [TESTING.md](./TESTING.md) for details.
If your changes need integration tests, write them against the API. The `cli`
integration tests are slowly either migrated to API tests or moved away as unit
tests in `docker/cli` and end-to-end tests for Docker.
Submit unit tests for your changes. Go has a great test framework built in; use
it! Take a look at existing tests for inspiration. [Run the full test
suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before
submitting a pull request.
Update the documentation when creating or modifying features. Test your
documentation changes for clarity, concision, and correctness, as well as a
@@ -224,9 +251,10 @@ calling it in another file constitute a single logical unit of work. The very
high majority of submissions should have a single commit, so if in doubt: squash
down to one.
After every commit, [make sure the test suite passes](./TESTING.md). Include
documentation changes in the same pull request so that a revert would remove
all traces of the feature or fix.
After every commit, [make sure the test suite passes]
(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation
changes in the same pull request so that a revert would remove all traces of
the feature or fix.
Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that
close an issue. Including references automatically closes the issue on a merge.
@@ -238,11 +266,15 @@ Please see the [Coding Style](#coding-style) for further guidelines.
### Merge approval
Moby maintainers use LGTM (Looks Good To Me) in comments on the code review to
indicate acceptance, or use the Github review approval feature.
Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to
indicate acceptance.
For an explanation of the review and approval process see the
[REVIEWING](project/REVIEWING.md) page.
A change requires LGTMs from an absolute majority of the maintainers of each
component affected. For example, if a change affects `docs/` and `registry/`, it
needs an absolute majority from the maintainers of `docs/` AND, separately, an
absolute majority of the maintainers of `registry/`.
For more details, see the [MAINTAINERS](MAINTAINERS) page.
### Sign your work
@@ -302,46 +334,17 @@ commit automatically with `git commit -s`.
### How can I become a maintainer?
The procedures for adding new maintainers are explained in the
[/project/GOVERNANCE.md](/project/GOVERNANCE.md)
file in this repository.
global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/)
repository.
Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
maintainer to make a difference on the project!
### Manage issues and pull requests using the Derek bot
## Docker community guidelines
If you want to help label, assign, close or reopen issues or pull requests
without commit rights, ask a maintainer to add your Github handle to the
`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
For example:
* Labels
```
Derek add label: kind/question
Derek remove label: status/claimed
```
* Assign work
```
Derek assign: username
Derek unassign: me
```
* Manage issues and PRs
```
Derek close
Derek reopen
```
## Moby community guidelines
We want to keep the Moby community awesome, growing and collaborative. We need
We want to keep the Docker community awesome, growing and collaborative. We need
your help to keep it that way. To help with this we've come up with some general
guidelines for the community as a whole:
@@ -369,11 +372,6 @@ guidelines for the community as a whole:
used to ping maintainers to review a pull request, a proposal or an
issue.
The open source governance for this repository is handled via the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc)
charter. For any concerns with the community process regarding technical contributions,
please contact the TSC. More information on project governance is available in
our [project/GOVERNANCE.md](/project/GOVERNANCE.md) document.
### Guideline violations — 3 strikes method
The point of this section is not to find opportunities to punish people, but we

View File

@@ -1,465 +1,252 @@
# syntax=docker/dockerfile:1
ARG CROSS="false"
ARG SYSTEMD="false"
ARG GO_VERSION=1.19.3
ARG DEBIAN_FRONTEND=noninteractive
ARG VPNKIT_VERSION=0.5.0
ARG BASE_DEBIAN_DISTRO="bullseye"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE} AS base
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
ENV GO111MODULE=off
FROM base AS criu
ARG DEBIAN_FRONTEND
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_11/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_11/ /' > /etc/apt/sources.list.d/criu.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends criu \
&& install -D /usr/sbin/criu /build/criu
FROM base AS registry
WORKDIR /go/src/github.com/docker/distribution
# REGISTRY_VERSION specifies the version of the registry to build and install
# from the https://github.com/docker/distribution repository. This version of
# the registry is used to test both schema 1 and schema 2 manifests. Generally,
# the version specified here should match a current release.
ARG REGISTRY_VERSION=v2.3.0
# REGISTRY_VERSION_SCHEMA1 specifies the version of the registry to build and
# install from the https://github.com/docker/distribution repository. This is
# an older (pre v2.3.0) version of the registry that only supports schema1
# manifests. This version of the registry is not working on arm64, so installation
# is skipped on that architecture.
ARG REGISTRY_VERSION_SCHEMA1=v2.1.0
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/docker/distribution.git . \
&& git checkout -q "$REGISTRY_VERSION" \
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
&& case $(dpkg --print-architecture) in \
amd64|armhf|ppc64*|s390x) \
git checkout -q "$REGISTRY_VERSION_SCHEMA1"; \
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
;; \
esac
FROM base AS swagger
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
# GO_SWAGGER_COMMIT specifies the version of the go-swagger binary to build and
# install. Go-swagger is used in CI for validating swagger.yaml in hack/validate/swagger-gen
# This file describes the standard way to build Docker, using docker
#
# Currently uses a fork from https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix,
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
ENV GO_SWAGGER_COMMIT c56166c036004ba7a3a321e5951ba472b9ae298c
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/kolyshkin/go-swagger.git . \
&& git checkout -q "$GO_SWAGGER_COMMIT" \
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
# frozen-images
# See also frozenImages in "testutil/environment/protect.go" (which needs to
# be updated when adding images to this list)
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
jq
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
ARG TARGETARCH
ARG TARGETVARIANT
RUN /download-frozen-image-v2.sh /build \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
FROM base AS cross-false
FROM --platform=linux/amd64 base AS cross-true
ARG DEBIAN_FRONTEND
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armel
RUN dpkg --add-architecture armhf
RUN dpkg --add-architecture ppc64el
RUN dpkg --add-architecture s390x
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
crossbuild-essential-arm64 \
crossbuild-essential-armel \
crossbuild-essential-armhf \
crossbuild-essential-ppc64el \
crossbuild-essential-s390x
FROM cross-${CROSS} AS dev-base
FROM dev-base AS runtime-dev-cross-false
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
binutils-mingw-w64 \
g++-mingw-w64-x86-64 \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev \
libsystemd-dev \
libudev-dev
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
ARG DEBIAN_FRONTEND
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
# on non-amd64 systems, so other architectures cannot crossbuild amd64.
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libapparmor-dev:arm64 \
libapparmor-dev:armel \
libapparmor-dev:armhf \
libapparmor-dev:ppc64el \
libapparmor-dev:s390x \
libseccomp-dev:arm64 \
libseccomp-dev:armel \
libseccomp-dev:armhf \
libseccomp-dev:ppc64el \
libseccomp-dev:s390x
FROM runtime-dev-cross-${CROSS} AS runtime-dev
FROM base AS delve
# DELVE_VERSION specifies the version of the Delve debugger binary
# from the https://github.com/go-delve/delve repository.
# It can be used to run Docker with a possibility of
# attaching debugger to it.
# Usage:
#
ARG DELVE_VERSION=v1.8.1
# Delve on Linux is currently only supported on amd64 and arm64;
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
case $(dpkg --print-architecture) in \
amd64|arm64) \
GOBIN=/build/ GO111MODULE=on go install "github.com/go-delve/delve/cmd/dlv@${DELVE_VERSION}" \
&& /build/dlv --help \
;; \
*) \
mkdir -p /build/ \
;; \
esac
FROM base AS tomll
# GOTOML_VERSION specifies the version of the tomll binary to build and install
# from the https://github.com/pelletier/go-toml repository. This binary is used
# in CI in the hack/validate/toml script.
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# # Publish a release:
# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \
# -e GPG_PASSPHRASE=gloubiboulga \
# docker hack/release.sh
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
# When updating this version, consider updating the github.com/pelletier/go-toml
# dependency in vendor.mod accordingly.
ARG GOTOML_VERSION=v1.8.1
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/pelletier/go-toml/cmd/tomll@${GOTOML_VERSION}" \
&& /build/tomll --help
FROM base AS gowinres
# GOWINRES_VERSION defines go-winres tool version
ARG GOWINRES_VERSION=v0.3.0
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/tc-hib/go-winres@${GOWINRES_VERSION}" \
&& /build/go-winres --help
FROM debian:jessie
FROM dev-base AS containerd
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libbtrfs-dev
ARG CONTAINERD_VERSION
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/containerd.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
PREFIX=/build /install.sh containerd
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
FROM base AS golangci_lint
# FIXME: when updating golangci-lint, remove the temporary "nolint" in https://github.com/moby/moby/blob/7860686a8df15eea9def9e6189c6f9eca031bb6f/libnetwork/networkdb/cluster.go#L246
ARG GOLANGCI_LINT_VERSION=v1.49.0
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
&& /build/golangci-lint --version
# Add zfs ppa
COPY keys/launchpad-ppa-zfs.asc /go/src/github.com/docker/docker/keys/
RUN apt-key add /go/src/github.com/docker/docker/keys/launchpad-ppa-zfs.asc
RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
FROM base AS gotestsum
ARG GOTESTSUM_VERSION=v1.8.2
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \
&& /build/gotestsum --version
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
apt-utils \
aufs-tools \
automake \
bash-completion \
binutils-mingw-w64 \
bsdmainutils \
btrfs-tools \
build-essential \
clang \
cmake \
createrepo \
curl \
dpkg-sig \
gcc-mingw-w64 \
git \
iptables \
jq \
less \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libnl-3-dev \
libprotobuf-c0-dev \
libprotobuf-dev \
libsystemd-journal-dev \
libtool \
libzfs-dev \
mercurial \
net-tools \
pkg-config \
protobuf-compiler \
protobuf-c-compiler \
python-dev \
python-mock \
python-pip \
python-websocket \
tar \
ubuntu-zfs \
vim \
vim-common \
xfsprogs \
zip \
--no-install-recommends \
&& pip install awscli==1.10.15
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
FROM base AS shfmt
ARG SHFMT_VERSION=v3.0.2
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
&& /build/shfmt --version
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
FROM dev-base AS dockercli
ARG DOCKERCLI_CHANNEL
ARG DOCKERCLI_VERSION
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/dockercli.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
PREFIX=/build /install.sh dockercli
# Configure the container for OSX cross compilation
ENV OSX_SDK MacOSX10.11.sdk
ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953
RUN set -x \
&& export OSXCROSS_PATH="/osxcross" \
&& git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \
&& ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \
&& curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \
&& UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh
ENV PATH /osxcross/target/bin:$PATH
FROM runtime-dev AS runc
ARG RUNC_VERSION
ARG RUNC_BUILDTAGS
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/runc.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
PREFIX=/build /install.sh runc
# Install seccomp: the version shipped upstream is too old
ENV SECCOMP_VERSION 2.3.2
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
FROM dev-base AS tini
ARG DEBIAN_FRONTEND
ARG TINI_VERSION
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
cmake \
vim-common
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/tini.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
PREFIX=/build /install.sh tini
# Install Go
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
# with a heads-up.
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
| tar -xzC /usr/local
FROM dev-base AS rootlesskit
ARG ROOTLESSKIT_VERSION
ARG PREFIX=/build
COPY /hack/dockerfile/install/install.sh /hack/dockerfile/install/rootlesskit.installer /
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
/install.sh rootlesskit \
&& "${PREFIX}"/rootlesskit --version \
&& "${PREFIX}"/rootlesskit-docker-proxy --help
COPY ./contrib/dockerd-rootless.sh /build
COPY ./contrib/dockerd-rootless-setuptool.sh /build
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
FROM base AS crun
ARG CRUN_VERSION=1.4.5
RUN --mount=type=cache,sharing=locked,id=moby-crun-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-crun-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
autoconf \
automake \
build-essential \
libcap-dev \
libprotobuf-c-dev \
libseccomp-dev \
libsystemd-dev \
libtool \
libudev-dev \
libyajl-dev \
python3 \
;
RUN --mount=type=tmpfs,target=/tmp/crun-build \
git clone https://github.com/containers/crun.git /tmp/crun-build && \
cd /tmp/crun-build && \
git checkout -q "${CRUN_VERSION}" && \
./autogen.sh && \
./configure --bindir=/build && \
make -j install
# Compile Go for cross compilation
ENV DOCKER_CROSSPLATFORMS \
linux/386 linux/arm \
darwin/amd64 \
freebsd/amd64 freebsd/386 freebsd/arm \
windows/amd64 windows/386 \
solaris/amd64
# vpnkit
# use dummy scratch stage to avoid build to fail for unsupported platforms
FROM scratch AS vpnkit-windows
FROM scratch AS vpnkit-linux-386
FROM scratch AS vpnkit-linux-arm
FROM scratch AS vpnkit-linux-ppc64le
FROM scratch AS vpnkit-linux-riscv64
FROM scratch AS vpnkit-linux-s390x
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-amd64
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-arm64
FROM vpnkit-linux-${TARGETARCH} AS vpnkit-linux
FROM vpnkit-${TARGETOS} AS vpnkit
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev-systemd-false
ARG DEBIAN_FRONTEND
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install CRIU for checkpoint/restore support
ENV CRIU_VERSION 2.9
RUN mkdir -p /usr/src/criu \
&& curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \
&& cd /usr/src/criu \
&& make \
&& make install-criu
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.5.0
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT 4a08d04aef0595322e1b5ac7c52f28a931da85a5
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
# To run integration tests docker-pycreds is required.
# Before running the integration tests conftest.py is
# loaded which results in loads auth.py that
# imports the docker-pycreds module.
&& pip install docker-pycreds==0.2.1 \
&& pip install -r test-requirements.txt
# Install yamllint for validating swagger.yaml
RUN pip install yamllint==1.5.0
# Install go-swagger for validating swagger.yaml
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \
&& (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \
&& go install -v github.com/go-swagger/go-swagger/cmd/swagger
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
bash-completion \
bzip2 \
inetutils-ping \
iproute2 \
iptables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
libyajl2 \
net-tools \
patch \
pigz \
python3-pip \
python3-setuptools \
python3-wheel \
sudo \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip \
zstd
RUN useradd --create-home --gid docker unprivilegeduser
# Switch to use iptables instead of nftables (to match the CI hosts)
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
ARG YAMLLINT_VERSION=1.27.1
RUN pip3 install yamllint==${YAMLLINT_VERSION}
COPY --from=dockercli /build/ /usr/local/cli
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=swagger /build/ /usr/local/bin/
COPY --from=delve /build/ /usr/local/bin/
COPY --from=tomll /build/ /usr/local/bin/
COPY --from=gowinres /build/ /usr/local/bin/
COPY --from=tini /build/ /usr/local/bin/
COPY --from=registry /build/ /usr/local/bin/
COPY --from=criu /build/ /usr/local/bin/
COPY --from=gotestsum /build/ /usr/local/bin/
COPY --from=golangci_lint /build/ /usr/local/bin/
COPY --from=shfmt /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit / /usr/local/bin/
COPY --from=crun /build/ /usr/local/bin/
COPY hack/dockerfile/etc/docker/ /etc/docker/
ENV PATH=/usr/local/cli:$PATH
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
WORKDIR /go/src/github.com/docker/docker
VOLUME /var/lib/docker
VOLUME /home/unprivilegeduser/.local/share/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Add integration helps to bashrc
RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \
busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \
debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \
hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
FROM dev-systemd-false AS dev-systemd-true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
dbus \
dbus-user-session \
systemd \
systemd-sysv
RUN mkdir -p hack \
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
&& chmod +x hack/dind-systemd
ENTRYPOINT ["hack/dind-systemd"]
FROM dev-systemd-${SYSTEMD} AS dev
FROM runtime-dev AS binary-base
ARG DOCKER_GITCOMMIT=HEAD
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
ARG VERSION
ENV VERSION=${VERSION}
ARG PLATFORM
ENV PLATFORM=${PLATFORM}
ARG PRODUCT
ENV PRODUCT=${PRODUCT}
ARG DEFAULT_PRODUCT_LICENSE
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
ARG PACKAGER_NAME
ENV PACKAGER_NAME=${PACKAGER_NAME}
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
ENV PREFIX=/build
# TODO: This is here because hack/make.sh binary copies these extras binaries
# from $PATH into the bundles dir.
# It would be nice to handle this in a different way.
COPY --from=tini /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit / /usr/local/bin/
COPY --from=gowinres /build/ /usr/local/bin/
WORKDIR /go/src/github.com/docker/docker
FROM binary-base AS build-binary
RUN --mount=type=cache,target=/root/.cache \
--mount=type=bind,target=.,ro \
--mount=type=tmpfs,target=cli/winresources/dockerd \
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
hack/make.sh binary
FROM binary-base AS build-dynbinary
RUN --mount=type=cache,target=/root/.cache \
--mount=type=bind,target=.,ro \
--mount=type=tmpfs,target=cli/winresources/dockerd \
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
hack/make.sh dynbinary
FROM binary-base AS build-cross
ARG DOCKER_CROSSPLATFORMS
RUN --mount=type=cache,target=/root/.cache \
--mount=type=bind,target=.,ro \
--mount=type=tmpfs,target=cli/winresources/dockerd \
--mount=type=tmpfs,target=cli/winresources/docker-proxy \
hack/make.sh cross
FROM scratch AS binary
COPY --from=build-binary /build/bundles/ /
FROM scratch AS dynbinary
COPY --from=build-dynbinary /build/bundles/ /
FROM scratch AS cross
COPY --from=build-cross /build/bundles/ /
FROM dev AS final
# Upload docker source
COPY . /go/src/github.com/docker/docker

201
Dockerfile.aarch64 Normal file
View File

@@ -0,0 +1,201 @@
# This file describes the standard way to build Docker on aarch64, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.aarch64 .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM aarch64/ubuntu:xenial
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
cmake \
createrepo \
curl \
dpkg-sig \
g++ \
gcc \
git \
iptables \
jq \
libapparmor-dev \
libc6-dev \
libcap-dev \
libltdl-dev \
libsystemd-dev \
libyaml-dev \
mercurial \
net-tools \
parallel \
pkg-config \
python-dev \
python-mock \
python-pip \
python-setuptools \
python-websocket \
golang-go \
iproute2 \
iputils-ping \
vim-common \
--no-install-recommends
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Fix platform enablement in lvm2 to support aarch64 properly
RUN set -e \
&& for f in config.guess config.sub; do \
curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
done
# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install seccomp: the version shipped upstream is too old
ENV SECCOMP_VERSION 2.3.2
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# We don't have official binary golang 1.7.5 tarballs for ARM64, eigher for Go or
# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code.
# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because
# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8.
ENV GO_VERSION 1.7.5
RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
&& cd /usr/src/go/src \
&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
ENV PATH /go/bin:/usr/src/go/bin:$PATH
ENV GOPATH /go
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Only install one version of the registry, because old version which support
# schema1 manifests is not working on ARM64, we should skip integration-cli
# tests for schema1 manifests on ARM64.
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.5.0
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT 4a08d04aef0595322e1b5ac7c52f28a931da85a5
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install wheel \
# Before running the integration tests conftest.py is
# loaded which results in loads auth.py that
# imports the docker-pycreds module.
&& pip install docker-pycreds==0.2.1 \
&& pip install -r test-requirements.txt
# Install yamllint for validating swagger.yaml
RUN pip install yamllint==1.5.0
# Install go-swagger for validating swagger.yaml
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \
&& (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \
&& go install -v github.com/go-swagger/go-swagger/cmd/swagger
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \
aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \
aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \
aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

181
Dockerfile.armhf Normal file
View File

@@ -0,0 +1,181 @@
# This file describes the standard way to build Docker on ARMv7, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.armhf .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM armhf/debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
createrepo \
curl \
cmake \
dpkg-sig \
git \
iptables \
jq \
net-tools \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libsystemd-journal-dev \
libtool \
mercurial \
pkg-config \
python-dev \
python-mock \
python-pip \
python-websocket \
xfsprogs \
tar \
vim-common \
--no-install-recommends \
&& pip install awscli==1.10.15
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install Go
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
# We're building for armhf, which is ARMv7, so let's be explicit about that
ENV GOARCH arm
ENV GOARM 7
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install seccomp: the version shipped upstream is too old
ENV SECCOMP_VERSION 2.3.2
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.5.0
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \
armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \
armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \
armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,85 +0,0 @@
ARG GO_VERSION=1.19.3
FROM golang:${GO_VERSION}-alpine AS base
ENV GO111MODULE=off
RUN apk --no-cache add \
bash \
btrfs-progs-dev \
build-base \
curl \
lvm2-dev \
jq
RUN mkdir -p /build/
RUN mkdir -p /go/src/github.com/docker/docker/
WORKDIR /go/src/github.com/docker/docker/
FROM base AS frozen-images
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
RUN /download-frozen-image-v2.sh /build \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
FROM base AS dockercli
COPY hack/dockerfile/install/install.sh ./install.sh
COPY hack/dockerfile/install/dockercli.installer ./
RUN PREFIX=/build ./install.sh dockercli
# TestDockerCLIBuildSuite dependency
FROM base AS contrib
COPY contrib/syscall-test /build/syscall-test
COPY contrib/httpserver/Dockerfile /build/httpserver/Dockerfile
COPY contrib/httpserver contrib/httpserver
RUN CGO_ENABLED=0 go build -buildmode=pie -o /build/httpserver/httpserver github.com/docker/docker/contrib/httpserver
# Build the integration tests and copy the resulting binaries to /build/tests
FROM base AS builder
# Set tag and add sources
COPY . .
# Copy test sources tests that use assert can print errors
RUN mkdir -p /build${PWD} && find integration integration-cli -name \*_test.go -exec cp --parents '{}' /build${PWD} \;
# Build and install test binaries
ARG DOCKER_GITCOMMIT=undefined
RUN hack/make.sh build-integration-test-binary
RUN mkdir -p /build/tests && find . -name test.main -exec cp --parents '{}' /build/tests \;
## Generate testing image
FROM alpine:3.10 as runner
ENV DOCKER_REMOTE_DAEMON=1
ENV DOCKER_INTEGRATION_DAEMON_DEST=/
ENTRYPOINT ["/scripts/run.sh"]
# Add an unprivileged user to be used for tests which need it
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
# GNU tar is used for generating the emptyfs image
RUN apk --no-cache add \
bash \
ca-certificates \
g++ \
git \
inetutils-ping \
iptables \
libcap2-bin \
pigz \
tar \
xz
COPY hack/test/e2e-run.sh /scripts/run.sh
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
COPY integration/testdata /tests/integration/testdata
COPY integration/build/testdata /tests/integration/build/testdata
COPY integration-cli/fixtures /tests/integration-cli/fixtures
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=dockercli /build/ /usr/bin/
COPY --from=contrib /build/ /tests/contrib/
COPY --from=builder /build/ /

188
Dockerfile.ppc64le Normal file
View File

@@ -0,0 +1,188 @@
# This file describes the standard way to build Docker on ppc64le, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.ppc64le .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM ppc64le/debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
apt-utils \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
cmake \
createrepo \
curl \
dpkg-sig \
git \
iptables \
jq \
net-tools \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libsystemd-journal-dev \
libtool \
mercurial \
pkg-config \
python-dev \
python-mock \
python-pip \
python-websocket \
xfsprogs \
tar \
vim-common \
--no-install-recommends
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Fix platform enablement in lvm2 to support ppc64le properly
RUN set -e \
&& for f in config.guess config.sub; do \
curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
done
# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
# Install seccomp: the version shipped upstream is too old
ENV SECCOMP_VERSION 2.3.2
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.5.0
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \
ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \
ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \
ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

181
Dockerfile.s390x Normal file
View File

@@ -0,0 +1,181 @@
# This file describes the standard way to build Docker on s390x, using docker
#
# Usage:
#
# # Assemble the full dev environment. This is slow the first time.
# docker build -t docker -f Dockerfile.s390x .
#
# # Mount your source in an interactive container for quick testing:
# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
#
# # Run the test suite:
# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
#
# Note: AppArmor used to mess with privileged mode, but this is no longer
# the case. Therefore, you don't have to disable it anymore.
#
FROM s390x/debian:jessie
# Packaged dependencies
RUN apt-get update && apt-get install -y \
apparmor \
apt-utils \
aufs-tools \
automake \
bash-completion \
btrfs-tools \
build-essential \
cmake \
createrepo \
curl \
dpkg-sig \
git \
iptables \
jq \
net-tools \
libapparmor-dev \
libcap-dev \
libltdl-dev \
libsystemd-journal-dev \
libtool \
mercurial \
pkg-config \
python-dev \
python-mock \
python-pip \
python-websocket \
xfsprogs \
tar \
vim-common \
--no-install-recommends
# Install seccomp: the version shipped upstream is too old
ENV SECCOMP_VERSION 2.3.2
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Get lvm2 source for compiling statically
ENV LVM2_VERSION 2.02.103
RUN mkdir -p /usr/local/lvm2 \
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
| tar -xzC /usr/local/lvm2 --strip-components=1
# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
# Fix platform enablement in lvm2 to support s390x properly
RUN set -e \
&& for f in config.guess config.sub; do \
curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
done
# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
# Compile and install lvm2
RUN cd /usr/local/lvm2 \
&& ./configure \
--build="$(gcc -print-multiarch)" \
--enable-static_link \
&& make device-mapper \
&& make install_device-mapper
# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
# Dependency for golint
ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT)
# Grab Go's lint tool
ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
&& go install -v github.com/golang/lint/golint
# Install two versions of the registry. The first is an older version that
# only supports schema1 manifests. The second is a newer version that supports
# both. This allows integration-cli tests to cover push/pull with both schema1
# and schema2 manifests.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
&& rm -rf "$GOPATH"
# Install notary and notary-server
ENV NOTARY_VERSION v0.5.0
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
&& rm -rf "$GOPATH"
# Get the "docker-py" source so we can run their integration tests
ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324
RUN git clone https://github.com/docker/docker-py.git /docker-py \
&& cd /docker-py \
&& git checkout -q $DOCKER_PY_COMMIT \
&& pip install -r test-requirements.txt
# Set user.email so crosbymichael's in-container merge commits go smoothly
RUN git config --global user.email 'docker-dummy@example.com'
# Add an unprivileged user to be used for tests which need it
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/docker/docker
ENV DOCKER_BUILDTAGS apparmor selinux seccomp
# Let us use a .bashrc file
RUN ln -sfv $PWD/.bashrc ~/.bashrc
# Register Docker's bash completion.
RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \
s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \
s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \
s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699
# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
# Upload docker source
COPY . /go/src/github.com/docker/docker

View File

@@ -1,17 +1,11 @@
# docker build -t docker:simple -f Dockerfile.simple .
# docker run --rm docker:simple hack/make.sh dynbinary
# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit
# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration
# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli
# This represents the bare minimum required to build and test Docker.
ARG GO_VERSION=1.19.3
ARG BASE_DEBIAN_DISTRO="bullseye"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE}
ENV GO111MODULE=off
FROM debian:jessie
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
@@ -21,34 +15,56 @@ RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies
# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
btrfs-tools \
build-essential \
curl \
cmake \
gcc \
git \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev \
ca-certificates \
e2fsprogs \
iptables \
pkg-config \
pigz \
procps \
xfsprogs \
xz-utils \
\
aufs-tools \
vim-common \
&& rm -rf /var/lib/apt/lists/*
# Install seccomp: the version shipped upstream is too old
ENV SECCOMP_VERSION 2.3.2
RUN set -x \
&& export SECCOMP_PATH="$(mktemp -d)" \
&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
&& ( \
cd "$SECCOMP_PATH" \
&& ./configure --prefix=/usr/local \
&& make \
&& make install \
&& ldconfig \
) \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
# with a heads-up.
ENV GO_VERSION 1.7.5
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
| tar -xzC /usr/local
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
ENV CGO_LDFLAGS -L/lib
# Install runc, containerd, tini and docker-proxy
# Please edit hack/dockerfile/install/<name>.installer to update them.
COPY hack/dockerfile/install hack/dockerfile/install
RUN for i in runc containerd tini proxy dockercli; \
do hack/dockerfile/install/install.sh $i; \
done
ENV PATH=/usr/local/cli:$PATH
# Please edit hack/dockerfile/install-binaries.sh to update them.
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh runc containerd tini proxy
ENV AUTO_GOPATH 1
WORKDIR /usr/src/docker

20
Dockerfile.solaris Normal file
View File

@@ -0,0 +1,20 @@
# Defines an image that hosts a native Docker build environment for Solaris
# TODO: Improve stub
FROM solaris:latest
# compile and runtime deps
RUN pkg install --accept \
git \
gnu-coreutils \
gnu-make \
gnu-tar \
diagnostic/top \
golang \
library/golang/* \
developer/gcc-*
ENV GOPATH /go/:/usr/lib/gocode/1.5/
ENV DOCKER_CROSSPLATFORMS solaris/amd64
WORKDIR /go/src/github.com/docker/docker
COPY . /go/src/github.com/docker/docker

View File

@@ -45,8 +45,8 @@
#
# 1. Clone the sources from github.com:
#
# >> git clone https://github.com/docker/docker.git C:\gopath\src\github.com\docker\docker
# >> Cloning into 'C:\gopath\src\github.com\docker\docker'...
# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker
# >> Cloning into 'C:\go\src\github.com\docker\docker'...
# >> remote: Counting objects: 186216, done.
# >> remote: Compressing objects: 100% (21/21), done.
# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195
@@ -59,7 +59,7 @@
#
# 2. Change directory to the cloned docker sources:
#
# >> cd C:\gopath\src\github.com\docker\docker
# >> cd C:\go\src\github.com\docker\docker
#
#
# 3. Build a docker image with the components required to build the docker binaries from source
@@ -79,8 +79,8 @@
# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination
# folder on the host system where you want the binaries to be located.
#
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
# >> docker cp binaries:C:\gopath\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe
# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe
#
#
# 6. (Optional) Remove the interim container holding the built executable binaries:
@@ -132,8 +132,8 @@
# Important notes:
# ---------------
#
# Don't attempt to use a bind mount to pass a local directory as the bundles target
# directory. It does not work (golang attempts for follow a mapped folder incorrectly).
# Don't attempt to use a bind-mount to pass a local directory as the bundles target
# directory. It does not work (golang attempts for follow a mapped folder incorrectly).
# Instead, use docker cp as per the example.
#
# go.zip is not removed from the image as it is used by the Windows CI servers
@@ -147,41 +147,24 @@
# The docker integration tests do not currently run in a container on Windows, predominantly
# due to Windows not supporting privileged mode, so anything using a volume would fail.
# They (along with the rest of the docker CI suite) can be run using
# https://github.com/kevpar/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
#
# -----------------------------------------------------------------------------------------
# The number of build steps below are explicitly minimised to improve performance.
# Extremely important - do not change the following line to reference a "specific" image,
# such as `mcr.microsoft.com/windows/servercore:ltsc2022`. If using this Dockerfile in process
# isolated containers, the kernel of the host must match the container image, and hence
# would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5).
# It is expected that the image `microsoft/windowsservercore:latest` is present, and matches
# the hosts kernel version before doing a build.
FROM microsoft/windowsservercore
# Use PowerShell as the default shell
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
ARG GO_VERSION=1.19.3
ARG GOTESTSUM_VERSION=v1.8.2
ARG GOWINRES_VERSION=v0.3.0
ARG CONTAINERD_VERSION=v1.6.10
# Environment variable notes:
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
# - CONTAINERD_VERSION must be consistent with 'hack/dockerfile/install/containerd.installer' used by Linux.
# - FROM_DOCKERFILE is used for detection of building within a container.
ENV GO_VERSION=${GO_VERSION} `
CONTAINERD_VERSION=${CONTAINERD_VERSION} `
ENV GO_VERSION=1.7.5 `
GIT_VERSION=2.11.1 `
GOPATH=C:\gopath `
GO111MODULE=off `
FROM_DOCKERFILE=1 `
GOTESTSUM_VERSION=${GOTESTSUM_VERSION} `
GOWINRES_VERSION=${GOWINRES_VERSION}
GOPATH=C:\go `
FROM_DOCKERFILE=1
RUN `
Function Test-Nano() { `
@@ -210,30 +193,28 @@ RUN `
Throw ("Failed to download " + $source) `
}`
} else { `
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; `
$webClient = New-Object System.Net.WebClient; `
$webClient.DownloadFile($source, $target); `
} `
} `
`
setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin;C:\containerd\bin'); `
setx /M PATH $('C:\git\cmd;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); `
`
Write-Host INFO: Downloading git...; `
$location='https://www.nuget.org/api/v2/package/GitForWindows/'+$Env:GIT_VERSION; `
Download-File $location C:\gitsetup.zip; `
`
Write-Host INFO: Downloading go...; `
$dlGoVersion=$Env:GO_VERSION -replace '\.0$',''; `
Download-File "https://golang.org/dl/go${dlGoVersion}.windows-amd64.zip" C:\go.zip; `
Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; `
`
Write-Host INFO: Downloading compiler 1 of 3...; `
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; `
`
Write-Host INFO: Downloading compiler 2 of 3...; `
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; `
`
Write-Host INFO: Downloading compiler 3 of 3...; `
Download-File https://raw.githubusercontent.com/moby/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; `
`
Write-Host INFO: Extracting git...; `
Expand-Archive C:\gitsetup.zip C:\git-tmp; `
@@ -257,61 +238,19 @@ RUN `
Remove-Item C:\binutils.zip; `
Remove-Item C:\gitsetup.zip; `
`
Write-Host INFO: Downloading containerd; `
Install-Package -Force 7Zip4PowerShell; `
$location='https://github.com/containerd/containerd/releases/download/'+$Env:CONTAINERD_VERSION+'/containerd-'+$Env:CONTAINERD_VERSION.TrimStart('v')+'-windows-amd64.tar.gz'; `
Download-File $location C:\containerd.tar.gz; `
New-Item -Path C:\containerd -ItemType Directory; `
Expand-7Zip C:\containerd.tar.gz C:\; `
Expand-7Zip C:\containerd.tar C:\containerd; `
Remove-Item C:\containerd.tar.gz; `
Remove-Item C:\containerd.tar; `
`
# Ensure all directories exist that we will require below....
$srcDir = """$Env:GOPATH`\src\github.com\docker\docker\bundles"""; `
Write-Host INFO: Ensuring existence of directory $srcDir...; `
New-Item -Force -ItemType Directory -Path $srcDir | Out-Null; `
Write-Host INFO: Creating source directory...; `
New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; `
`
Write-Host INFO: Configuring git core.autocrlf...; `
C:\git\cmd\git config --global core.autocrlf true;
RUN `
Function Install-GoTestSum() { `
$Env:GO111MODULE = 'on'; `
$tmpGobin = "${Env:GOBIN_TMP}"; `
$Env:GOBIN = """${Env:GOPATH}`\bin"""; `
Write-Host "INFO: Installing gotestsum version $Env:GOTESTSUM_VERSION in $Env:GOBIN"; `
&go install "gotest.tools/gotestsum@${Env:GOTESTSUM_VERSION}"; `
$Env:GOBIN = "${tmpGobin}"; `
$Env:GO111MODULE = 'off'; `
if ($LASTEXITCODE -ne 0) { `
Throw '"gotestsum install failed..."'; `
} `
} `
C:\git\cmd\git config --global core.autocrlf true; `
`
Install-GoTestSum
RUN `
Function Install-GoWinres() { `
$Env:GO111MODULE = 'on'; `
$tmpGobin = "${Env:GOBIN_TMP}"; `
$Env:GOBIN = """${Env:GOPATH}`\bin"""; `
Write-Host "INFO: Installing go-winres version $Env:GOWINRES_VERSION in $Env:GOBIN"; `
&go install "github.com/tc-hib/go-winres@${Env:GOWINRES_VERSION}"; `
$Env:GOBIN = "${tmpGobin}"; `
$Env:GO111MODULE = 'off'; `
if ($LASTEXITCODE -ne 0) { `
Throw '"go-winres install failed..."'; `
} `
} `
`
Install-GoWinres
Write-Host INFO: Completed
# Make PowerShell the default entrypoint
ENTRYPOINT ["powershell.exe"]
# Set the working directory to the location of the sources
WORKDIR ${GOPATH}\src\github.com\docker\docker
WORKDIR C:\go\src\github.com\docker\docker
# Copy the sources into the container
COPY . .

563
Jenkinsfile vendored
View File

@@ -1,563 +0,0 @@
#!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '33a3680e08d1007e72c3b3f1454f823d8e9948ee'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'arm64 && ubuntu-2004' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}

View File

@@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2013-2018 Docker, Inc.
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,14 +1,12 @@
# Moby maintainers file
# Docker maintainers file
#
# This file describes the maintainer groups within the moby/moby project.
# More detail on Moby project governance is available in the
# project/GOVERNANCE.md file found in this repository.
# This file describes who runs the docker/docker project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
#
# TODO(estesp): This file should not necessarily depend on docker/opensource
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
@@ -23,20 +21,30 @@
# a subsystem, they are responsible for doing so and holding the
# subsystem maintainers accountable. If ownership is unclear, they are the de facto owners.
# For each release (including minor releases), a "release captain" is assigned from the
# pool of core maintainers. Rotation is encouraged across all maintainers, to ensure
# the release process is clear and up-to-date.
people = [
"aaronlehmann",
"akihirosuda",
"aluzzardi",
"anusha",
"coolljt0725",
"cpuguy83",
"crosbymichael",
"dnephin",
"duglin",
"estesp",
"johnstep",
"icecrime",
"jhowardmsft",
"justincormack",
"kolyshkin",
"lk4d4",
"mavenugo",
"mhbauer",
"mlaventure",
"runcom",
"samuelkarp",
"stevvooe",
"thajeztah",
"tianon",
"tibor",
"tonistiigi",
@@ -46,6 +54,15 @@
"yongtang"
]
[Org."Docs maintainers"]
# TODO Describe the docs maintainers role.
people = [
"misty",
"thajeztah"
]
[Org.Curators]
# The curators help ensure that incoming issues and pull requests are properly triaged and
@@ -59,18 +76,17 @@
# - close an issue or pull request when it's inappropriate or off-topic
people = [
"aboch",
"alexellis",
"andrewhsu",
"corhere",
"anonymuse",
"chanwit",
"ehazlett",
"fntlnz",
"gianarb",
"ndeloof",
"neersighted",
"olljanat",
"mgoelzer",
"programmerq",
"ripcurld",
"rumpl",
"samwhited",
"rheinwein",
"thajeztah"
]
@@ -81,22 +97,6 @@
# Thank you!
people = [
# Aaron Lehmann was a maintainer for swarmkit, the registry, and the engine,
# and contributed many improvements, features, and bugfixes in those areas,
# among which "automated service rollbacks", templated secrets and configs,
# and resumable image layer downloads.
"aaronlehmann",
# Harald Albers is the mastermind behind the bash completion scripts for the
# Docker CLI. The completion scripts moved to the Docker CLI repository, so
# you can now find him perform his magic in the https://github.com/docker/cli repository.
"albers",
# Andrea Luzzardi started contributing to the Docker codebase in the "dotCloud"
# era, even before it was called "Docker". He is one of the architects of both
# Swarm and SwarmKit, and its integration into the Docker engine.
"aluzzardi",
# David Calavera contributed many features to Docker, such as an improved
# event system, dynamic configuration reloading, volume plugins, fancy
# new templating options, and an external client credential store. As a
@@ -106,30 +106,6 @@
# and tweets as @calavera.
"calavera",
# Michael Crosby was "chief maintainer" of the Docker project.
# During his time as a maintainer, Michael contributed to many
# milestones of the project; he was release captain of Docker v1.0.0,
# started the development of "libcontainer" (what later became runc)
# and containerd, as well as demoing cool hacks such as live migrating
# a game server container with checkpoint/restore.
#
# Michael is currently a maintainer of containerd, but you may see
# him around in other projects on GitHub.
"crosbymichael",
# Before becoming a maintainer, Daniel Nephin was a core contributor
# to "Fig" (now known as Docker Compose). As a maintainer for both the
# Engine and Docker CLI, Daniel contributed many features, among which
# the `docker stack` commands, allowing users to deploy their Docker
# Compose projects as a Swarm service.
"dnephin",
# Doug Davis contributed many features and fixes for the classic builder,
# such as "wildcard" copy, the dockerignore file, custom paths/names
# for the Dockerfile, as well as enhancements to the API and documentation.
# Follow Doug on Twitter, where he tweets as @duginabox.
"duglin",
# As a maintainer, Erik was responsible for the "builder", and
# started the first designs for the new networking model in
# Docker. Erik is now working on all kinds of plugins for Docker
@@ -138,24 +114,6 @@
# still stumble into him in our issue tracker, or on IRC.
"erikh",
# Evan Hazlett is the creator of the Shipyard and Interlock open source projects,
# and the author of "Orca", which became the foundation of Docker Universal Control
# Plane (UCP). As a maintainer, Evan helped integrating SwarmKit (secrets, tasks)
# into the Docker engine.
"ehazlett",
# Arnaud Porterie (AKA "icecrime") was in charge of maintaining the maintainers.
# As a maintainer, he made life easier for contributors to the Docker open-source
# projects, bringing order in the chaos by designing a triage- and review workflow
# using labels (see https://icecrime.net/technology/a-structured-approach-to-labeling/),
# and automating the hell out of things with his buddies GordonTheTurtle and Poule
# (a chicken!).
#
# A lesser-known fact is that he created the first commit in the libnetwork repository
# even though he didn't know anything about it. Some say, he's now selling stuff on
# the internet ;-)
"icecrime",
# After a false start with his first PR being rejected, James Turnbull became a frequent
# contributor to the documentation, and became a docs maintainer on December 5, 2013. As
# a maintainer, James lifted the docs to a higher standard, and introduced the community
@@ -175,41 +133,13 @@
# containers a lot more secure). Besides being a maintainer, she
# set up the CI infrastructure for the project, giving everyone
# something to shout at if a PR failed ("noooo Janky!").
# Jess is currently working on the DCOS security team at Mesosphere,
# and contributing to various open source projects.
# Be sure you don't miss her talks at a conference near you (a must-see),
# read her blog at https://blog.jessfraz.com (a must-read), and
# check out her open source projects on GitHub https://github.com/jessfraz (a must-try).
"jessfraz",
# As a maintainer, John Howard managed to make the impossible possible;
# to run Docker on Windows. After facing many challenges, teaching
# fellow-maintainers that 'Windows is not Linux', and many changes in
# Windows Server to facilitate containers, native Windows containers
# saw the light of day in 2015.
#
# John is now enjoying life without containers: playing piano, painting,
# and walking his dogs, but you may occasionally see him drop by on GitHub.
"lowenna",
# Alexander Morozov contributed many features to Docker, worked on the premise of
# what later became containerd (and worked on that too), and made a "stupid" Go
# vendor tool specifically for docker/docker needs: vndr (https://github.com/LK4D4/vndr).
# Not many know that Alexander is a master negotiator, being able to change course
# of action with a single "Nope, we're not gonna do that".
"lk4d4",
# Madhu Venugopal was part of the SocketPlane team that joined Docker.
# As a maintainer, he was working with Jana for the Container Network
# Model (CNM) implemented through libnetwork, and the "routing mesh" powering
# Swarm mode networking.
"mavenugo",
# As a maintainer, Kenfe-Mickaël Laventure worked on the container runtime,
# integrating containerd 1.0 with the daemon, and adding support for custom
# OCI runtimes, as well as implementing the `docker prune` subcommands,
# which was a welcome feature to be added. You can keep up with Mickaél on
# Twitter (@kmlaventure).
"mlaventure",
# As a docs maintainer, Mary Anthony contributed greatly to the Docker
# docs. She wrote the Docker Contributor Guide and Getting Started
# Guides. She helped create a doc build system independent of
@@ -277,6 +207,11 @@
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.aboch]
Name = "Alessandro Boch"
Email = "aboch@docker.com"
GitHub = "aboch"
[people.alexellis]
Name = "Alex Ellis"
Email = "alexellis2@gmail.com"
@@ -284,7 +219,7 @@
[people.akihirosuda]
Name = "Akihiro Suda"
Email = "akihiro.suda.cz@hco.ntt.co.jp"
Email = "suda.akihiro@lab.ntt.co.jp"
GitHub = "AkihiroSuda"
[people.aluzzardi]
@@ -292,16 +227,16 @@
Email = "al@docker.com"
GitHub = "aluzzardi"
[people.albers]
Name = "Harald Albers"
Email = "github@albersweb.de"
GitHub = "albers"
[people.andrewhsu]
Name = "Andrew Hsu"
Email = "andrewhsu@docker.com"
GitHub = "andrewhsu"
[people.anonymuse]
Name = "Jesse White"
Email = "anonymuse@gmail.com"
GitHub = "anonymuse"
[people.anusha]
Name = "Anusha Ragunathan"
Email = "anusha@docker.com"
@@ -317,15 +252,15 @@
Email = "leijitang@huawei.com"
GitHub = "coolljt0725"
[people.corhere]
Name = "Cory Snider"
Email = "csnider@mirantis.com"
GitHub = "corhere"
[people.cpuguy83]
Name = "Brian Goff"
Email = "cpuguy83@gmail.com"
GitHub = "cpuguy83"
Github = "cpuguy83"
[people.chanwit]
Name = "Chanwit Kaewkasi"
Email = "chanwit@gmail.com"
GitHub = "chanwit"
[people.crosbymichael]
Name = "Michael Crosby"
@@ -369,7 +304,7 @@
[people.icecrime]
Name = "Arnaud Porterie"
Email = "icecrime@gmail.com"
Email = "arnaud@docker.com"
GitHub = "icecrime"
[people.jamtur01]
@@ -377,49 +312,49 @@
Email = "james@lovedthanlost.net"
GitHub = "jamtur01"
[people.jhowardmsft]
Name = "John Howard"
Email = "jhoward@microsoft.com"
GitHub = "jhowardmsft"
[people.jessfraz]
Name = "Jessie Frazelle"
Email = "jess@linux.com"
GitHub = "jessfraz"
[people.johnstep]
Name = "John Stephens"
Email = "johnstep@docker.com"
GitHub = "johnstep"
[people.justincormack]
Name = "Justin Cormack"
Email = "justin.cormack@docker.com"
GitHub = "justincormack"
[people.kolyshkin]
Name = "Kir Kolyshkin"
Email = "kolyshkin@gmail.com"
GitHub = "kolyshkin"
[people.lk4d4]
Name = "Alexander Morozov"
Email = "lk4d4@docker.com"
GitHub = "lk4d4"
[people.lowenna]
Name = "John Howard"
Email = "github@lowenna.com"
GitHub = "lowenna"
[people.mavenugo]
Name = "Madhu Venugopal"
Email = "madhu@docker.com"
GitHub = "mavenugo"
[people.mgoelzer]
Name = "Mike Goelzer"
Email = "mike.goelzer@docker.com"
GitHub = "mgoelzer"
[people.mhbauer]
Name = "Morgan Bauer"
Email = "mbauer@us.ibm.com"
GitHub = "mhbauer"
[people.misty]
Name = "Misty Stanley-Jones"
Email = "misty@docker.com"
GitHub = "mstanleyjones"
[people.mlaventure]
Name = "Kenfe-Mickaël Laventure"
Email = "mickael.laventure@gmail.com"
Email = "mickael.laventure@docker.com"
GitHub = "mlaventure"
[people.moxiegirl]
@@ -432,51 +367,21 @@
Email = "mrjana@docker.com"
GitHub = "mrjana"
[people.ndeloof]
Name = "Nicolas De Loof"
Email = "nicolas.deloof@gmail.com"
GitHub = "ndeloof"
[people.neersighted]
Name = "Bjorn Neergaard"
Email = "bneergaard@mirantis.com"
GitHub = "neersighted"
[people.olljanat]
Name = "Olli Janatuinen"
Email = "olli.janatuinen@gmail.com"
GitHub = "olljanat"
[people.programmerq]
Name = "Jeff Anderson"
Email = "jeff@docker.com"
GitHub = "programmerq"
[people.ripcurld]
Name = "Boaz Shuster"
Email = "ripcurld.github@gmail.com"
GitHub = "ripcurld"
[people.rumpl]
Name = "Djordje Lukic"
Email = "djordje.lukic@docker.com"
GitHub = "rumpl"
[people.rheinwein]
Name = "Laura Frank"
Email = "laura@codeship.com"
GitHub = "rheinwein"
[people.runcom]
Name = "Antonio Murdaca"
Email = "runcom@redhat.com"
GitHub = "runcom"
[people.samuelkarp]
Name = "Samuel Karp"
Email = "me@samuelkarp.com"
GitHub = "samuelkarp"
[people.samwhited]
Name = "Sam Whited"
Email = "sam@samwhited.com"
GitHub = "samwhited"
[people.shykes]
Name = "Solomon Hykes"
Email = "solomon@docker.com"
@@ -541,3 +446,4 @@
Name = "Yong Tang"
Email = "yong.tang.github@outlook.com"
GitHub = "yongtang"

224
Makefile
View File

@@ -1,121 +1,83 @@
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate validate-% win
DOCKER ?= docker
BUILDX ?= $(DOCKER) buildx
.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win
# set the graph driver as the current graphdriver if not set
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
export DOCKER_GRAPHDRIVER
DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1)
export DOCKER_INCREMENTAL_BINARY
# get OS/Arch of docker engine
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}')
DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}')
DOCKER_GITCOMMIT := $(shell git rev-parse --short HEAD || echo unsupported)
export DOCKER_GITCOMMIT
# allow overriding the repository and branch that validation scripts are running
# against these are used in hack/validate/.validate to check what changed in the PR.
export VALIDATE_REPO
export VALIDATE_BRANCH
export VALIDATE_ORIGIN_BRANCH
# env vars passed through directly to Docker's build scripts
# to allow things like `make KEEPBUNDLE=1 binary` easily
# `project/PACKAGERS.md` have some limited documentation of some of these
#
# DOCKER_LDFLAGS can be used to pass additional parameters to -ldflags
# option of "go build". For example, a built-in graphdriver priority list
# can be changed during build time like this:
#
# make DOCKER_LDFLAGS="-X github.com/docker/docker/daemon/graphdriver.priority=overlay2,devicemapper" dynbinary
#
DOCKER_ENVS := \
-e DOCKER_CROSSPLATFORMS \
-e BUILD_APT_MIRROR \
-e BUILDFLAGS \
-e KEEPBUNDLE \
-e DOCKER_BUILD_ARGS \
-e DOCKER_BUILD_GOGC \
-e DOCKER_BUILD_OPTS \
-e DOCKER_BUILD_PKGS \
-e DOCKER_BUILDKIT \
-e DOCKER_BASH_COMPLETION_PATH \
-e DOCKER_CLI_PATH \
-e DOCKER_CROSSPLATFORMS \
-e DOCKER_DEBUG \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_LDFLAGS \
-e DOCKER_INCREMENTAL_BINARY \
-e DOCKER_PORT \
-e DOCKER_REMAP_ROOT \
-e DOCKER_ROOTLESS \
-e DOCKER_STORAGE_OPTS \
-e DOCKER_TEST_HOST \
-e DOCKER_USERLANDPROXY \
-e DOCKERD_ARGS \
-e DELVE_PORT \
-e GITHUB_ACTIONS \
-e TEST_FORCE_VALIDATE \
-e TEST_INTEGRATION_DIR \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e TESTCOVERAGE \
-e TESTDEBUG \
-e TESTDIRS \
-e TESTFLAGS \
-e TESTFLAGS_INTEGRATION \
-e TESTFLAGS_INTEGRATION_CLI \
-e TEST_FILTER \
-e TIMEOUT \
-e VALIDATE_REPO \
-e VALIDATE_BRANCH \
-e VALIDATE_ORIGIN_BRANCH \
-e VERSION \
-e PLATFORM \
-e DEFAULT_PRODUCT_LICENSE \
-e PRODUCT \
-e PACKAGER_NAME
-e HTTP_PROXY \
-e HTTPS_PROXY \
-e NO_PROXY \
-e http_proxy \
-e https_proxy \
-e no_proxy
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
# (default to no bind mount if DOCKER_HOST is set)
# note: BINDDIR is supported for backwards-compatibility here
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
# DOCKER_MOUNT can be overriden, but use at your own risk!
ifndef DOCKER_MOUNT
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT))
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
# The volume will be cleaned up when the container is removed due to `--rm`.
# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git"
DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache -v docker-mod-cache:/go/pkg/mod/
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
endif # ifndef DOCKER_MOUNT
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v $(CURDIR)/.git:/go/src/github.com/docker/docker/.git
# This allows to set the docker-dev container name
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
DOCKER_IMAGE := docker-dev
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
DELVE_PORT_FORWARD := $(if $(DELVE_PORT),-p "$(DELVE_PORT)",)
# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE)
DOCKER_FLAGS := $(DOCKER) run --rm --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) $(DELVE_PORT_FORWARD)
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD)
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
export BUILD_APT_MIRROR
SWAGGER_DOCS_PORT ?= 9000
define \n
endef
INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
# if this session isn't interactive, then we don't want to allocate a
# TTY, which would fail, but if it is interactive, we do want to attach
@@ -125,111 +87,83 @@ ifeq ($(INTERACTIVE), 1)
DOCKER_FLAGS += -t
endif
# on GitHub Runners input device is not a TTY but we allocate a pseudo-one,
# otherwise keep STDIN open even if not attached if not a GitHub Runner.
ifeq ($(GITHUB_ACTIONS),true)
DOCKER_FLAGS += -t
else
DOCKER_FLAGS += -i
endif
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
ifdef DOCKER_SYSTEMD
DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
endif
BUILD_OPTS := ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -f "$(DOCKERFILE)"
BUILD_CMD := $(BUILDX) build
# This is used for the legacy "build" target and anything still depending on it
BUILD_CROSS =
ifdef DOCKER_CROSS
BUILD_CROSS = --build-arg CROSS=$(DOCKER_CROSS)
endif
ifdef DOCKER_CROSSPLATFORMS
BUILD_CROSS = --build-arg CROSS=true
endif
VERSION_AUTOGEN_ARGS = --build-arg VERSION --build-arg DOCKER_GITCOMMIT --build-arg PRODUCT --build-arg PLATFORM --build-arg DEFAULT_PRODUCT_LICENSE --build-arg PACKAGER_NAME
default: binary
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
binary: bundles ## build statically linked linux binaries
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
binary: build ## build the linux binaries
$(DOCKER_RUN_DOCKER) hack/make.sh binary
dynbinary: bundles ## build dynamically linked linux binaries
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
cross: BUILD_OPTS += --build-arg CROSS=true --build-arg DOCKER_CROSSPLATFORMS
cross: bundles ## cross build the binaries for darwin, freebsd and\nwindows
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
build: bundles init-go-pkg-cache
docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
bundles:
mkdir bundles
.PHONY: clean
clean: clean-cache
clean: clean-pkg-cache-vol ## clean up cached resources
clean-pkg-cache-vol:
@- $(foreach mapping,$(PKGCACHE_MAP), \
$(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \
)
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
deb: build ## build the deb packages
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb
.PHONY: clean-cache
clean-cache: ## remove the docker volumes that are used for caching in the dev-container
docker volume rm -f docker-dev-cache docker-mod-cache
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
init-go-pkg-cache:
$(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g'))
install: ## install the linux binaries
KEEPBUNDLE=1 hack/make.sh install-binary
manpages: ## Generate man pages from go source and markdown
docker build ${DOCKER_BUILD_ARGS} -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man
docker run --rm \
-v $(PWD):/go/src/github.com/docker/docker/ \
docker-manpage-dev
rpm: build ## build the rpm packages
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm
run: build ## run the docker daemon in a container
$(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run"
.PHONY: build
ifeq ($(BIND_DIR), .)
build: shell_target := --target=dev
else
build: shell_target := --target=final
endif
build: bundles
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load $(BUILD_CROSS) -t "$(DOCKER_IMAGE)" .
shell: build ## start a shell inside the build env
shell: build ## start a shell inside the build env
$(DOCKER_RUN_DOCKER) bash
test: build test-unit ## run the unit, integration and docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-integration test-docker-py
yaml-docs-gen: build ## generate documentation YAML files consumed by docs repo
$(DOCKER_RUN_DOCKER) sh -c 'hack/make.sh yaml-docs-generator && ( cd bundles/latest/yaml-docs-generator; mkdir docs; ./yaml-docs-generator --target $$(pwd)/docs )'
test: build ## run the unit, integration and docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py
test-docker-py: build ## run the docker-py tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py
test-integration-cli: test-integration ## (DEPRECATED) use test-integration
ifneq ($(and $(TEST_SKIP_INTEGRATION),$(TEST_SKIP_INTEGRATION_CLI)),)
test-integration:
@echo Both integrations suites skipped per environment variables
else
test-integration: build ## run the integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
endif
test-integration-flaky: build ## run the stress test for all new integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
test-integration-cli: build ## run the integration tests
$(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli
test-unit: build ## run the unit tests
$(DOCKER_RUN_DOCKER) hack/test/unit
$(DOCKER_RUN_DOCKER) hack/make.sh test-unit
tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
$(DOCKER_RUN_DOCKER) hack/validate/all
validate-%: build ## validate specific check
$(DOCKER_RUN_DOCKER) hack/validate/$*
win: build ## cross build the binary for windows
$(DOCKER_RUN_DOCKER) DOCKER_CROSSPLATFORMS=windows/amd64 hack/make.sh cross
$(DOCKER_RUN_DOCKER) hack/make.sh win
.PHONY: swagger-gen
swagger-gen:
@@ -245,4 +179,20 @@ swagger-docs: ## preview the API documentation
@docker run --rm -v $(PWD)/api/swagger.yaml:/usr/share/nginx/html/swagger.yaml \
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
-p $(SWAGGER_DOCS_PORT):80 \
bfirsh/redoc:1.14.0
bfirsh/redoc:1.6.2
build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
@echo "Building hack/integration-cli-on-swarm"
go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on
@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
$(eval tmp := integration-cli-worker-tmp)
# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top
docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
docker exec $(tmp) go build -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
docker rm -f $(tmp)

2
NOTICE
View File

@@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/creack/pty) developed
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:

309
README.md
View File

@@ -1,48 +1,270 @@
The Moby Project
================
Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest)
============================
![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
Docker is an open source project to pack, ship and run any application
as a lightweight container.
Moby is an open-source project created by Docker to enable and accelerate software containerization.
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
This means they can run anywhere, from your laptop to the largest
cloud compute instance and everything in between - and they don't require
you to use a particular language, framework or packaging system. That
makes them great building blocks for deploying and scaling web apps,
databases, and backend services without depending on a particular stack
or provider.
It provides a "Lego set" of toolkit components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts and professionals to experiment and exchange ideas.
Components include container build tools, a container registry, orchestration tools, a runtime and more, and these can be used as building blocks in conjunction with other tools and projects.
Docker began as an open-source implementation of the deployment engine which
powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/),
a popular Platform-as-a-Service. It benefits directly from the experience
accumulated over several years of large-scale operation and support of hundreds
of thousands of applications and databases.
## Principles
![Docker logo](docs/static_files/docker-logo-compressed.png "Docker")
Moby is an open project guided by strong principles, aiming to be modular, flexible and without too strong an opinion on user experience.
It is open to the community to help set its direction.
## Security Disclosure
- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
- Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
- Usable security: Moby provides secure defaults without compromising usability.
- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
They are not necessarily intended as end user tools but as components aimed at developers.
Documentation and UX is aimed at developers not end users.
Security is very important to us. If you have any issue regarding security,
please disclose the information responsibly by sending an email to
security@docker.com and not by creating a GitHub issue.
## Audience
## Better than VMs
The Moby Project is intended for engineers, integrators and enthusiasts looking to modify, hack, fix, experiment, invent and build systems based on containers.
It is not for people looking for a commercially supported system, but for people who want to work and learn with open source code.
A common method for distributing applications and sandboxing their
execution is to use virtual machines, or VMs. Typical VM formats are
VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory
these formats should allow every developer to automatically package
their application into a "machine" for easy distribution and deployment.
In practice, that almost never happens, for a few reasons:
## Relationship with Docker
* *Size*: VMs are very large which makes them impractical to store
and transfer.
* *Performance*: running VMs consumes significant CPU and memory,
which makes them impractical in many scenarios, for example local
development of multi-tier applications, and large-scale deployment
of cpu and memory-intensive applications on large numbers of
machines.
* *Portability*: competing VM environments don't play well with each
other. Although conversion tools do exist, they are limited and
add even more overhead.
* *Hardware-centric*: VMs were designed with machine operators in
mind, not software developers. As a result, they offer very
limited tooling for what developers need most: building, testing
and running their software. For example, VMs offer no facilities
for application versioning, monitoring, configuration, logging or
service discovery.
The components and tools in the Moby Project are initially the open source components that Docker and the community have built for the Docker Project.
New projects can be added if they fit with the community goals. Docker is committed to using Moby as the upstream for the Docker Product.
However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed.
By contrast, Docker relies on a different sandboxing method known as
*containerization*. Unlike traditional virtualization, containerization
takes place at the kernel level. Most modern operating system kernels
now support the primitives necessary for containerization, including
Linux with [openvz](https://openvz.org),
[vserver](http://linux-vserver.org) and more recently
[lxc](https://linuxcontainers.org/), Solaris with
[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
and FreeBSD with
[Jails](https://www.freebsd.org/doc/handbook/jails.html).
The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful.
The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases.
Docker builds on top of these low-level primitives to offer developers a
portable format and runtime environment that solves all four problems.
Docker containers are small (and their transfer can be optimized with
layers), they have basically zero memory and cpu overhead, they are
completely portable, and are designed from the ground up with an
application-centric design.
-----
Perhaps best of all, because Docker operates at the OS level, it can still be
run inside a VM!
Legal
=====
## Plays well with others
Docker does not require you to buy into a particular programming
language, framework, packaging system, or configuration language.
Is your application a Unix process? Does it use files, tcp connections,
environment variables, standard Unix streams and command-line arguments
as inputs and outputs? Then Docker can run it.
Can your application's build be expressed as a sequence of such
commands? Then Docker can build it.
## Escape dependency hell
A common problem for developers is the difficulty of managing all
their application's dependencies in a simple and automated way.
This is usually difficult for several reasons:
* *Cross-platform dependencies*. Modern applications often depend on
a combination of system libraries and binaries, language-specific
packages, framework-specific modules, internal components
developed for another project, etc. These dependencies live in
different "worlds" and require different tools - these tools
typically don't work well with each other, requiring awkward
custom integrations.
* *Conflicting dependencies*. Different applications may depend on
different versions of the same dependency. Packaging tools handle
these situations with various degrees of ease - but they all
handle them in different and incompatible ways, which again forces
the developer to do extra work.
* *Custom dependencies*. A developer may need to prepare a custom
version of their application's dependency. Some packaging systems
can handle custom versions of a dependency, others can't - and all
of them handle it differently.
Docker solves the problem of dependency hell by giving the developer a simple
way to express *all* their application's dependencies in one place, while
streamlining the process of assembling them. If this makes you think of
[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
*replace* your favorite packaging systems. It simply orchestrates
their use in a simple and repeatable way. How does it do that? With
layers.
Docker defines a build as running a sequence of Unix commands, one
after the other, in the same container. Build commands modify the
contents of the container (usually by installing new files on the
filesystem), the next command modifies it some more, etc. Since each
build command inherits the result of the previous commands, the
*order* in which the commands are executed expresses *dependencies*.
Here's a typical Docker build process:
```bash
FROM ubuntu:12.04
RUN apt-get update && apt-get install -y python python-pip curl
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
```
Note that Docker doesn't care *how* dependencies are built - as long
as they can be built by running a Unix command in a container.
Getting started
===============
Docker can be installed either on your computer for building applications or
on servers for running them. To get started, [check out the installation
instructions in the
documentation](https://docs.docker.com/engine/installation/).
Usage examples
==============
Docker can be used to run short-lived commands, long-running daemons
(app servers, databases, etc.), interactive shell sessions, etc.
You can find a [list of real-world
examples](https://docs.docker.com/engine/examples/) in the
documentation.
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The
[cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
and
[namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html)
capabilities of the Linux kernel
* The [Go](https://golang.org) programming language
* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md)
Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker)
======================
| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** |
|------------------|----------------------|---------|---------|
| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) |
Want to hack on Docker? Awesome! We have [instructions to help you get
started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/).
These instructions are probably not perfect, please let us know if anything
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
Getting the development builds
==============================
Want to run Docker from a master build? You can download
master builds at [master.dockerproject.org](https://master.dockerproject.org).
They are updated with each commit merged into the master branch.
Don't know how to use that super cool new feature in the master build? Check
out the master docs at
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
How the project is run
======================
Docker is a very, very active project. If you want to learn more about how it is run,
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
We are always open to suggestions on process improvements, and are always looking for more maintainers.
### Talking to other Docker users and contributors
<table class="tg">
<col width="45%">
<col width="65%">
<tr>
<td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
<td>
<p>
IRC is a direct line to our most knowledgeable Docker users; we have
both the <code>#docker</code> and <code>#docker-dev</code> group on
<strong>irc.freenode.net</strong>.
IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p>
Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
</td>
</tr>
<tr>
<td>Docker Community Forums</td>
<td>
The <a href="https://forums.docker.com/c/open-source-projects/de" target="_blank">Docker Engine</a>
group is for users of the Docker Engine project.
</td>
</tr>
<tr>
<td>Google Groups</td>
<td>
The <a href="https://groups.google.com/forum/#!forum/docker-dev"
target="_blank">docker-dev</a> group is for contributors and other people
contributing to the Docker project. You can join this group without a
Google account by sending an email to <a
href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
You'll receive a join-request message; simply reply to the message to
confirm your subscription.
</td>
</tr>
<tr>
<td>Twitter</td>
<td>
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
to get updates on our products. You can also tweet us questions or just
share blogs or stories.
</td>
</tr>
<tr>
<td>Stack Overflow</td>
<td>
Stack Overflow has over 7000 Docker questions listed. We regularly
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
and so do many other knowledgeable Docker users.
</td>
</tr>
</table>
### Legal
*Brought to you courtesy of our legal counsel. For more context,
please see the [NOTICE](https://github.com/moby/moby/blob/master/NOTICE) document in this repo.*
please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
Use and transfer of Moby may be subject to certain restrictions by the
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
@@ -50,8 +272,33 @@ violate applicable laws.
For more information, please see https://www.bis.doc.gov
Licensing
=========
Moby is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/moby/moby/blob/master/LICENSE) for the full
Docker is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
license text.
Other Docker Related Projects
=============================
There are a number of projects under development that are based on Docker's
core technology. These projects expand the tooling built around the
Docker platform to broaden its application and utility.
* [Docker Registry](https://github.com/docker/distribution): Registry
server for Docker (hosting/delivery of repositories and images)
* [Docker Machine](https://github.com/docker/machine): Machine management
for a container-centric world
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
system
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
Define and run multi-container apps
* [Kitematic](https://github.com/docker/kitematic): The easiest way to use
Docker on Mac and Windows
If you know of another project underway that should be listed here, please help
us keep this list up-to-date by submitting a PR.
Awesome-Docker
==============
You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there.

View File

@@ -1,117 +1,118 @@
Moby Project Roadmap
====================
Docker Engine Roadmap
=====================
### How should I use this document?
This document provides description of items that the project decided to prioritize. This should
serve as a reference point for Moby contributors to understand where the project is going, and
help determine if a contribution could be conflicting with some longer term plans.
serve as a reference point for Docker contributors to understand where the project is going, and
help determine if a contribution could be conflicting with some longer terms plans.
The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be
refused! We are always happy to receive patches for new cool features we haven't thought about,
or didn't judge to be a priority. Please however understand that such patches might take longer
for us to review.
refused (except for those mentioned as "frozen features" below)! We are always happy to receive
patches for new cool features we haven't thought about, or didn't judge priority. Please however
understand that such patches might take longer for us to review.
### How can I help?
Short term objectives are listed in
[Issues](https://github.com/moby/moby/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our
Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described
in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our
goal is to split down the workload in such way that anybody can jump in and help. Please comment on
issues if you want to work on it to avoid duplicating effort! Similarly, if a maintainer is already
assigned on an issue you'd like to participate in, pinging him on GitHub to offer your help is
issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already
assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is
the best way to go.
### How can I add something to the roadmap?
The roadmap process is new to the Moby Project: we are only beginning to structure and document the
The roadmap process is new to the Docker Engine: we are only beginning to structure and document the
project objectives. Our immediate goal is to be more transparent, and work with our community to
focus our efforts on fewer prioritized topics.
We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but
we are not quite there yet. For the time being, it is best to discuss with the maintainers on an
issue, in the Slack channel, or in person at the Moby Summits that happen every few months.
we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we
won't be accepting pull requests adding or removing items from this file.
# 1. Features and refactoring
## 1.1 Runtime improvements
Over time we have accumulated a lot of functionality in the container runtime
aspect of Moby while also growing in other areas. Much of the container runtime
pieces are now duplicated work available in other, lower level components such
as [containerd](https://containerd.io).
We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container
execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional
default libcontainer `execdriver`, but the Engine internals were not ready for this.
Moby currently only utilizes containerd for basic runtime state management, e.g. starting
and stopping a container, which is what the pre-containerd 1.0 daemon provided.
Now that containerd is a full-fledged container runtime which supports full
container life-cycle management, we would like to start relying more on containerd
and removing the bits in Moby which are now duplicated. This will necessitate
a significant effort to refactor and even remove large parts of Moby's codebase.
As runC continued evolving, and the OCI specification along with it, we created
[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is
the new target for Engine integration, as it can entirely replace the whole `execdriver`
architecture, and container monitoring along with it.
Tracking issues:
Docker Engine will rely on a long-running `containerd` companion daemon for all container execution
related operations. This could open the door in the future for Engine restarts without interrupting
running containers.
- [#38043](https://github.com/moby/moby/issues/38043) Proposal: containerd image integration
## 1.2 Plugins improvements
## 1.2 Image Builder
Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks
extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real
world feedback before optimizing for any particular workflow.
Work is ongoing to integrate [BuildKit](https://github.com/moby/buildkit) into
Moby and replace the "v0" build implementation. Buildkit offers better cache
management, parallelizable build steps, and better extensibility while also
keeping builds portable, a chief tenent of Moby's builder.
In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of
plugins. This implies in particular making it trivially easy to distribute plugins as containers
through any Registry instance, as well as solving the commonly heard pain points of plugins needing
to be treated as somewhat special (being active at all time, started before any other user
containers, and not as easily dismissed).
Upon completion of this effort, users will have a builder that performs better
while also being more extensible, enabling users to provide their own custom
syntax which can be either Dockerfile-like or something completely different.
## 1.3 Internal decoupling
See [buildpacks on buildkit](https://github.com/tonistiigi/buildkit-pack) as an
example of this extensibility.
A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the
API implementation has been refactored, and the Builder side of the daemon is now
[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in
the same repository.
New features for the builder and Dockerfile should be implemented first in the
BuildKit backend using an external Dockerfile implementation from the container
images. This allows everyone to test and evaluate the feature without upgrading
their daemon. New features should go to the experimental channel first, and can be
part of the `docker/dockerfile:experimental` image. From there they graduate to
`docker/dockerfile:latest` and binary releases. The Dockerfile frontend source
code is temporarily located at
[https://github.com/moby/buildkit/tree/master/frontend/dockerfile](https://github.com/moby/buildkit/tree/master/frontend/dockerfile)
with separate new features defined with go build tags.
We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the
runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support
with the concept of "special" containers opens the door for bootstrapping more Engine internals
using the same facilities.
Tracking issues:
## 1.4 Cluster capable Engine
- [#32925](https://github.com/moby/moby/issues/32925) discussion: builder future: buildkit
The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent
adding features such as multihost networking, and node discovery down at the Engine level. Yet, the
Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm
for that.
## 1.3 Rootless Mode
We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the
Docker Engine being already capable of discovering each other and establish overlay networking for
their container to communicate, the next step is for a given Engine to gain ability to dispatch work
to another node in the cluster. This will be introduced in a backward compatible way, such that a
`docker run` invocation on a particular node remains fully deterministic.
Running the daemon requires elevated privileges for many tasks. We would like to
support running the daemon as a normal, unprivileged user without requiring `suid`
binaries.
# 2. Frozen features
Tracking issues:
## 2.1 Docker exec
- [#37375](https://github.com/moby/moby/issues/37375) Proposal: allow running `dockerd` as an unprivileged user (aka rootless mode)
We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a
*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort.
## 1.4 Testing
## 2.2 Remote Registry Operations
Moby has many tests, both unit and integration. Moby needs more tests which can
cover the full spectrum functionality and edge cases out there.
A large amount of work is ongoing in the area of image distribution and provenance. This includes
moving to the V2 Registry API and heavily refactoring the code that powers these features. The
desired result is more secure, reliable and easier to use image distribution.
Tests in the `integration-cli` folder should also be migrated into (both in
location and style) the `integration` folder. These newer tests are simpler to
run in isolation, simpler to read, simpler to write, and more fully exercise the
API. Meanwhile tests of the docker CLI should generally live in docker/cli.
Part of the problem with this part of the code base is the lack of a stable and flexible interface.
If new features are added that access the registry without solidifying these interfaces, achieving
feature parity will continue to be elusive. While we get a handle on this situation, we are imposing
a moratorium on new code that accesses the Registry API in commands that don't already make remote
calls.
Tracking issues:
Currently, only the following commands cause interaction with a remote registry:
- [#32866](https://github.com/moby/moby/issues/32866) Replace integration-cli suite with API test suite
- push
- pull
- run
- build
- search
- login
## 1.5 Internal decoupling
A lot of work has been done in trying to decouple Moby internals. This process of creating
standalone projects with a well defined function that attract a dedicated community should continue.
As well as integrating `containerd` we would like to integrate [BuildKit](https://github.com/moby/buildkit)
as the next standalone component.
We see gRPC as the natural communication layer between decoupled components.
In addition to pushing out large components into other projects, much of the
internal code structure, and in particular the
["Daemon"](https://godoc.org/github.com/docker/docker/daemon#Daemon) object,
should be split into smaller, more manageable, and more testable components.
In the interest of stabilizing the registry access model during this ongoing work, we are not
accepting additions to other commands that will cause remote interaction with the Registry API. This
moratorium will lift when the goals of the distribution project have been met.

View File

@@ -1,9 +0,0 @@
# Reporting security issues
The Moby maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
### Reporting a Vulnerability
Please **DO NOT** file a public issue, instead send your report privately to security@docker.com.
Security reports are greatly appreciated and we will publicly thank you for it, although we keep your name confidential if you request it. We also like to send gifts—if you're into schwag, make sure to let us know. We currently do not offer a paid security bounty program, but are not ruling it out in the future.

View File

@@ -1,126 +0,0 @@
# Testing
This document contains the Moby code testing guidelines. It should answer any
questions you may have as an aspiring Moby contributor.
## Test suites
Moby has two test suites (and one legacy test suite):
* Unit tests - use standard `go test` and
[gotest.tools/assert](https://godoc.org/gotest.tools/assert) assertions. They are located in
the package they test. Unit tests should be fast and test only their own
package.
* API integration tests - use standard `go test` and
[gotest.tools/assert](https://godoc.org/gotest.tools/assert) assertions. They are located in
`./integration/<component>` directories, where `component` is: container,
image, volume, etc. These tests perform HTTP requests to an API endpoint and
check the HTTP response and daemon state after the call.
The legacy test suite `integration-cli/` is deprecated. No new tests will be
added to this suite. Any tests in this suite which require updates should be
ported to either the unit test suite or the new API integration test suite.
## Writing new tests
Most code changes will fall into one of the following categories.
### Writing tests for new features
New code should be covered by unit tests. If the code is difficult to test with
unit tests, then that is a good sign that it should be refactored to make it
easier to reuse and maintain. Consider accepting unexported interfaces instead
of structs so that fakes can be provided for dependencies.
If the new feature includes a completely new API endpoint then a new API
integration test should be added to cover the success case of that endpoint.
If the new feature does not include a completely new API endpoint consider
adding the new API fields to the existing test for that endpoint. A new
integration test should **not** be added for every new API field or API error
case. Error cases should be handled by unit tests.
### Writing tests for bug fixes
Bugs fixes should include a unit test case which exercises the bug.
A bug fix may also include new assertions in existing integration tests for the
API endpoint.
### Writing new integration tests
Note the `integration-cli` tests are deprecated; new tests will be rejected by
the CI.
Instead, implement new tests under `integration/`.
### Integration tests environment considerations
When adding new tests or modifying existing tests under `integration/`, testing
environment should be properly considered. `skip.If` from
[gotest.tools/skip](https://godoc.org/gotest.tools/skip) can be used to make the
test run conditionally. Full testing environment conditions can be found at
[environment.go](https://github.com/moby/moby/blob/6b6eeed03b963a27085ea670f40cd5ff8a61f32e/testutil/environment/environment.go)
Here is a quick example. If the test needs to interact with a docker daemon on
the same host, the following condition should be checked within the test code
```go
skip.If(t, testEnv.IsRemoteDaemon())
// your integration test code
```
If a remote daemon is detected, the test will be skipped.
## Running tests
### Unit Tests
To run the unit test suite:
```
make test-unit
```
or `hack/test/unit` from inside a `BINDDIR=. make shell` container or properly
configured environment.
The following environment variables may be used to run a subset of tests:
* `TESTDIRS` - paths to directories to be tested, defaults to `./...`
* `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern
use `TESTFLAGS="-test.run TestNameOrPrefix"`
### Integration Tests
To run the integration test suite:
```
make test-integration
```
This make target runs both the "integration" suite and the "integration-cli"
suite.
You can specify which integration test dirs to build and run by specifying
the list of dirs in the TEST_INTEGRATION_DIR environment variable.
You can also explicitly skip either suite by setting (any value) in
TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables.
Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and
TESTFLAGS_INTEGRATION_CLI environment variables.
If all you want is to specify a test filter to run, you can set the
`TEST_FILTER` environment variable. This ends up getting passed directly to `go
test -run` (or `go test -check-f`, depending on the test suite). It will also
automatically set the other above mentioned environment variables accordingly.
### Go Version
You can change a version of golang used for building stuff that is being tested
by setting `GO_VERSION` variable, for example:
```
make GO_VERSION=1.12.8 test
```

1
VERSION Normal file
View File

@@ -0,0 +1 @@
17.04.0-ce

View File

@@ -10,17 +10,17 @@ It consists of various components in this repository:
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
- `daemon/` The daemon, which serves the API.
## Swagger definition
## Swagger definition
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
1. Automatically generate documentation.
2. Automatically generate the Go server and client. (A work-in-progress.)
1. To automatically generate documentation.
2. To automatically generate the Go server and client. (A work-in-progress.)
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
## Updating the API documentation
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation.
The file is split into two main sections:
@@ -29,9 +29,9 @@ The file is split into two main sections:
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919)
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing.
## Viewing the API documentation

View File

@@ -1,11 +1,166 @@
package api // import "github.com/docker/docker/api"
package api
import (
"encoding/json"
"encoding/pem"
"fmt"
"mime"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/system"
"github.com/docker/libtrust"
)
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
DefaultVersion = "1.42"
DefaultVersion string = "1.28"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
NoBaseImageSpecifier = "scratch"
NoBaseImageSpecifier string = "scratch"
)
// byPortInfo is a temporary type used to sort types.Port by its fields
type byPortInfo []types.Port
func (r byPortInfo) Len() int { return len(r) }
func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byPortInfo) Less(i, j int) bool {
if r[i].PrivatePort != r[j].PrivatePort {
return r[i].PrivatePort < r[j].PrivatePort
}
if r[i].IP != r[j].IP {
return r[i].IP < r[j].IP
}
if r[i].PublicPort != r[j].PublicPort {
return r[i].PublicPort < r[j].PublicPort
}
return r[i].Type < r[j].Type
}
// DisplayablePorts returns formatted string representing open ports of container
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
// it's used by command 'docker ps'
func DisplayablePorts(ports []types.Port) string {
type portGroup struct {
first uint16
last uint16
}
groupMap := make(map[string]*portGroup)
var result []string
var hostMappings []string
var groupMapKeys []string
sort.Sort(byPortInfo(ports))
for _, port := range ports {
current := port.PrivatePort
portKey := port.Type
if port.IP != "" {
if port.PublicPort != current {
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
continue
}
portKey = fmt.Sprintf("%s/%s", port.IP, port.Type)
}
group := groupMap[portKey]
if group == nil {
groupMap[portKey] = &portGroup{first: current, last: current}
// record order that groupMap keys are created
groupMapKeys = append(groupMapKeys, portKey)
continue
}
if current == (group.last + 1) {
group.last = current
continue
}
result = append(result, formGroup(portKey, group.first, group.last))
groupMap[portKey] = &portGroup{first: current, last: current}
}
for _, portKey := range groupMapKeys {
g := groupMap[portKey]
result = append(result, formGroup(portKey, g.first, g.last))
}
result = append(result, hostMappings...)
return strings.Join(result, ", ")
}
func formGroup(key string, start, last uint16) string {
parts := strings.Split(key, "/")
groupType := parts[0]
var ip string
if len(parts) > 1 {
ip = parts[0]
groupType = parts[1]
}
group := strconv.Itoa(int(start))
if start != last {
group = fmt.Sprintf("%s-%d", group, last)
}
if ip != "" {
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
}
return fmt.Sprintf("%s/%s", group, groupType)
}
// MatchesContentType validates the content type against the expected one
func MatchesContentType(contentType, expectedType string) bool {
mimetype, _, err := mime.ParseMediaType(contentType)
if err != nil {
logrus.Errorf("Error parsing media type: %s error: %v", contentType, err)
}
return err == nil && mimetype == expectedType
}
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
// otherwise generates a new one
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700)
if err != nil {
return nil, err
}
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
if err == libtrust.ErrKeyFileDoesNotExist {
trustKey, err = libtrust.GenerateECP256PrivateKey()
if err != nil {
return nil, fmt.Errorf("Error generating key: %s", err)
}
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
if err != nil {
return nil, fmt.Errorf("Error serializing key: %s", err)
}
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
return nil, fmt.Errorf("Error saving key file: %s", err)
}
} else if err != nil {
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
}
return trustKey, nil
}
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
if ext == ".json" || ext == ".jwk" {
encoded, err = json.Marshal(key)
if err != nil {
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
}
} else {
pemBlock, err := key.PEMBlock()
if err != nil {
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
}
encoded = pem.EncodeToMemory(pemBlock)
}
return
}

341
api/common_test.go Normal file
View File

@@ -0,0 +1,341 @@
package api
import (
"io/ioutil"
"path/filepath"
"testing"
"os"
"github.com/docker/docker/api/types"
)
type ports struct {
ports []types.Port
expected string
}
// DisplayablePorts
func TestDisplayablePorts(t *testing.T) {
cases := []ports{
{
[]types.Port{
{
PrivatePort: 9988,
Type: "tcp",
},
},
"9988/tcp"},
{
[]types.Port{
{
PrivatePort: 9988,
Type: "udp",
},
},
"9988/udp",
},
{
[]types.Port{
{
IP: "0.0.0.0",
PrivatePort: 9988,
Type: "tcp",
},
},
"0.0.0.0:0->9988/tcp",
},
{
[]types.Port{
{
PrivatePort: 9988,
PublicPort: 8899,
Type: "tcp",
},
},
"9988/tcp",
},
{
[]types.Port{
{
IP: "4.3.2.1",
PrivatePort: 9988,
PublicPort: 8899,
Type: "tcp",
},
},
"4.3.2.1:8899->9988/tcp",
},
{
[]types.Port{
{
IP: "4.3.2.1",
PrivatePort: 9988,
PublicPort: 9988,
Type: "tcp",
},
},
"4.3.2.1:9988->9988/tcp",
},
{
[]types.Port{
{
PrivatePort: 9988,
Type: "udp",
}, {
PrivatePort: 9988,
Type: "udp",
},
},
"9988/udp, 9988/udp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PublicPort: 9998,
PrivatePort: 9998,
Type: "udp",
}, {
IP: "1.2.3.4",
PublicPort: 9999,
PrivatePort: 9999,
Type: "udp",
},
},
"1.2.3.4:9998-9999->9998-9999/udp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PublicPort: 8887,
PrivatePort: 9998,
Type: "udp",
}, {
IP: "1.2.3.4",
PublicPort: 8888,
PrivatePort: 9999,
Type: "udp",
},
},
"1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp",
},
{
[]types.Port{
{
PrivatePort: 9998,
Type: "udp",
}, {
PrivatePort: 9999,
Type: "udp",
},
},
"9998-9999/udp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PrivatePort: 6677,
PublicPort: 7766,
Type: "tcp",
}, {
PrivatePort: 9988,
PublicPort: 8899,
Type: "udp",
},
},
"9988/udp, 1.2.3.4:7766->6677/tcp",
},
{
[]types.Port{
{
IP: "1.2.3.4",
PrivatePort: 9988,
PublicPort: 8899,
Type: "udp",
}, {
IP: "1.2.3.4",
PrivatePort: 9988,
PublicPort: 8899,
Type: "tcp",
}, {
IP: "4.3.2.1",
PrivatePort: 2233,
PublicPort: 3322,
Type: "tcp",
},
},
"4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp",
},
{
[]types.Port{
{
PrivatePort: 9988,
PublicPort: 8899,
Type: "udp",
}, {
IP: "1.2.3.4",
PrivatePort: 6677,
PublicPort: 7766,
Type: "tcp",
}, {
IP: "4.3.2.1",
PrivatePort: 2233,
PublicPort: 3322,
Type: "tcp",
},
},
"9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp",
},
{
[]types.Port{
{
PrivatePort: 80,
Type: "tcp",
}, {
PrivatePort: 1024,
Type: "tcp",
}, {
PrivatePort: 80,
Type: "udp",
}, {
PrivatePort: 1024,
Type: "udp",
}, {
IP: "1.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "tcp",
}, {
IP: "1.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "udp",
}, {
IP: "1.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "tcp",
}, {
IP: "1.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "udp",
}, {
IP: "2.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "tcp",
}, {
IP: "2.1.1.1",
PublicPort: 80,
PrivatePort: 1024,
Type: "udp",
}, {
IP: "2.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "tcp",
}, {
IP: "2.1.1.1",
PublicPort: 1024,
PrivatePort: 80,
Type: "udp",
},
},
"80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp",
},
}
for _, port := range cases {
actual := DisplayablePorts(port.ports)
if port.expected != actual {
t.Fatalf("Expected %s, got %s.", port.expected, actual)
}
}
}
// MatchesContentType
func TestJsonContentType(t *testing.T) {
if !MatchesContentType("application/json", "application/json") {
t.Fail()
}
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
t.Fail()
}
if MatchesContentType("dockerapplication/json", "application/json") {
t.Fail()
}
}
// LoadOrCreateTrustKey
func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpKeyFolderPath)
tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
if err != nil {
t.Fatal(err)
}
if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil {
t.Fatal("expected an error, got nothing.")
}
}
func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) {
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpKeyFolderPath)
// Without the need to create the folder hierarchy
tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile")
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
t.Fatalf("expected a new key file, got : %v and %v", err, key)
}
if _, err := os.Stat(tmpKeyFile); err != nil {
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
}
// With the need to create the folder hierarchy as tmpKeyFie is in a path
// where some folders do not exist.
tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile")
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
t.Fatalf("expected a new key file, got : %v and %v", err, key)
}
if _, err := os.Stat(tmpKeyFile); err != nil {
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
}
// With no path at all
defer os.Remove("keyfile")
if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil {
t.Fatalf("expected a new key file, got : %v and %v", err, key)
}
if _, err := os.Stat("keyfile"); err != nil {
t.Fatalf("Expected to find a file keyfile, got %v", err)
}
}
func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
tmpKeyFile := filepath.Join("fixtures", "keyfile")
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
t.Fatalf("expected a key file, got : %v and %v", err, key)
}
}

View File

@@ -1,7 +1,6 @@
//go:build !windows
// +build !windows
package api // import "github.com/docker/docker/api"
package api
// MinVersion represents Minimum REST API version supported
const MinVersion = "1.12"
const MinVersion string = "1.12"

View File

@@ -1,4 +1,4 @@
package api // import "github.com/docker/docker/api"
package api
// MinVersion represents Minimum REST API version supported
// Technically the first daemon API version released on Windows is v1.25 in

47
api/errors/errors.go Normal file
View File

@@ -0,0 +1,47 @@
package errors
import "net/http"
// apiError is an error wrapper that also
// holds information about response status codes.
type apiError struct {
error
statusCode int
}
// HTTPErrorStatusCode returns a status code.
func (e apiError) HTTPErrorStatusCode() int {
return e.statusCode
}
// NewErrorWithStatusCode allows you to associate
// a specific HTTP Status Code to an error.
// The server will take that code and set
// it as the response status.
func NewErrorWithStatusCode(err error, code int) error {
return apiError{err, code}
}
// NewBadRequestError creates a new API error
// that has the 400 HTTP status code associated to it.
func NewBadRequestError(err error) error {
return NewErrorWithStatusCode(err, http.StatusBadRequest)
}
// NewRequestForbiddenError creates a new API error
// that has the 403 HTTP status code associated to it.
func NewRequestForbiddenError(err error) error {
return NewErrorWithStatusCode(err, http.StatusForbidden)
}
// NewRequestNotFoundError creates a new API error
// that has the 404 HTTP status code associated to it.
func NewRequestNotFoundError(err error) error {
return NewErrorWithStatusCode(err, http.StatusNotFound)
}
// NewRequestConflictError creates a new API error
// that has the 409 HTTP status code associated to it.
func NewRequestConflictError(err error) error {
return NewErrorWithStatusCode(err, http.StatusConflict)
}

9
api/names.go Normal file
View File

@@ -0,0 +1,9 @@
package api
import "regexp"
// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)

View File

@@ -1,130 +0,0 @@
package build // import "github.com/docker/docker/api/server/backend/build"
import (
"context"
"fmt"
"strconv"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/builder"
buildkit "github.com/docker/docker/builder/builder-next"
daemonevents "github.com/docker/docker/daemon/events"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"google.golang.org/grpc"
)
// ImageComponent provides an interface for working with images
type ImageComponent interface {
SquashImage(from string, to string) (string, error)
TagImageWithReference(image.ID, reference.Named) error
}
// Builder defines interface for running a build
type Builder interface {
Build(context.Context, backend.BuildConfig) (*builder.Result, error)
}
// Backend provides build functionality to the API router
type Backend struct {
builder Builder
imageComponent ImageComponent
buildkit *buildkit.Builder
eventsService *daemonevents.Events
}
// NewBackend creates a new build backend from components
func NewBackend(components ImageComponent, builder Builder, buildkit *buildkit.Builder, es *daemonevents.Events) (*Backend, error) {
return &Backend{imageComponent: components, builder: builder, buildkit: buildkit, eventsService: es}, nil
}
// RegisterGRPC registers buildkit controller to the grpc server.
func (b *Backend) RegisterGRPC(s *grpc.Server) {
if b.buildkit != nil {
b.buildkit.RegisterGRPC(s)
}
}
// Build builds an image from a Source
func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) {
options := config.Options
useBuildKit := options.Version == types.BuilderBuildKit
tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags)
if err != nil {
return "", err
}
var build *builder.Result
if useBuildKit {
build, err = b.buildkit.Build(ctx, config)
if err != nil {
return "", err
}
} else {
build, err = b.builder.Build(ctx, config)
if err != nil {
return "", err
}
}
if build == nil {
return "", nil
}
var imageID = build.ImageID
if options.Squash {
if imageID, err = squashBuild(build, b.imageComponent); err != nil {
return "", err
}
if config.ProgressWriter.AuxFormatter != nil {
if err = config.ProgressWriter.AuxFormatter.Emit("moby.image.id", types.BuildResult{ID: imageID}); err != nil {
return "", err
}
}
}
if !useBuildKit {
stdout := config.ProgressWriter.StdoutFormatter
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
}
if imageID != "" {
err = tagger.TagImages(image.ID(imageID))
}
return imageID, err
}
// PruneCache removes all cached build sources
func (b *Backend) PruneCache(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
buildCacheSize, cacheIDs, err := b.buildkit.Prune(ctx, opts)
if err != nil {
return nil, errors.Wrap(err, "failed to prune build cache")
}
b.eventsService.Log("prune", events.BuilderEventType, events.Actor{
Attributes: map[string]string{
"reclaimed": strconv.FormatInt(buildCacheSize, 10),
},
})
return &types.BuildCachePruneReport{SpaceReclaimed: uint64(buildCacheSize), CachesDeleted: cacheIDs}, nil
}
// Cancel cancels the build by ID
func (b *Backend) Cancel(ctx context.Context, id string) error {
return b.buildkit.Cancel(ctx, id)
}
func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) {
var fromID string
if build.FromImage != nil {
fromID = build.FromImage.ImageID()
}
imageID, err := imageComponent.SquashImage(build.ImageID, fromID)
if err != nil {
return "", errors.Wrap(err, "error squashing image")
}
return imageID, nil
}

View File

@@ -1,77 +0,0 @@
package build // import "github.com/docker/docker/api/server/backend/build"
import (
"fmt"
"io"
"github.com/docker/distribution/reference"
"github.com/docker/docker/image"
"github.com/pkg/errors"
)
// Tagger is responsible for tagging an image created by a builder
type Tagger struct {
imageComponent ImageComponent
stdout io.Writer
repoAndTags []reference.Named
}
// NewTagger returns a new Tagger for tagging the images of a build.
// If any of the names are invalid tags an error is returned.
func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) {
reposAndTags, err := sanitizeRepoAndTags(names)
if err != nil {
return nil, err
}
return &Tagger{
imageComponent: backend,
stdout: stdout,
repoAndTags: reposAndTags,
}, nil
}
// TagImages creates image tags for the imageID
func (bt *Tagger) TagImages(imageID image.ID) error {
for _, rt := range bt.repoAndTags {
if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil {
return err
}
fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt))
}
return nil
}
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
// to a slice of repoAndTag.
// It also validates each repoName and tag.
func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
var (
repoAndTags []reference.Named
// This map is used for deduplicating the "-t" parameter.
uniqNames = make(map[string]struct{})
)
for _, repo := range names {
if repo == "" {
continue
}
ref, err := reference.ParseNormalizedNamed(repo)
if err != nil {
return nil, err
}
if _, isCanonical := ref.(reference.Canonical); isCanonical {
return nil, errors.New("build tag cannot contain a digest")
}
ref = reference.TagNameOnly(ref)
nameWithTag := ref.String()
if _, exists := uniqNames[nameWithTag]; !exists {
uniqNames[nameWithTag] = struct{}{}
repoAndTags = append(repoAndTags, ref)
}
}
return repoAndTags, nil
}

View File

@@ -1,34 +0,0 @@
package server
import (
"net/http"
"github.com/docker/docker/api/server/httpstatus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/gorilla/mux"
"google.golang.org/grpc/status"
)
// makeErrorHandler makes an HTTP handler that decodes a Docker error and
// returns it in the response.
func makeErrorHandler(err error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
statusCode := httpstatus.FromError(err)
vars := mux.Vars(r)
if apiVersionSupportsJSONErrors(vars["version"]) {
response := &types.ErrorResponse{
Message: err.Error(),
}
_ = httputils.WriteJSON(w, statusCode, response)
} else {
http.Error(w, status.Convert(err).Message(), statusCode)
}
}
}
func apiVersionSupportsJSONErrors(version string) bool {
const firstAPIVersionWithJSONErrors = "1.23"
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
}

View File

@@ -1,150 +0,0 @@
package httpstatus // import "github.com/docker/docker/api/server/httpstatus"
import (
"fmt"
"net/http"
containerderrors "github.com/containerd/containerd/errdefs"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/docker/errdefs"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type causer interface {
Cause() error
}
// FromError retrieves status code from error message.
func FromError(err error) int {
if err == nil {
logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
return http.StatusInternalServerError
}
var statusCode int
// Stop right there
// Are you sure you should be adding a new error class here? Do one of the existing ones work?
// Note that the below functions are already checking the error causal chain for matches.
switch {
case errdefs.IsNotFound(err):
statusCode = http.StatusNotFound
case errdefs.IsInvalidParameter(err):
statusCode = http.StatusBadRequest
case errdefs.IsConflict(err):
statusCode = http.StatusConflict
case errdefs.IsUnauthorized(err):
statusCode = http.StatusUnauthorized
case errdefs.IsUnavailable(err):
statusCode = http.StatusServiceUnavailable
case errdefs.IsForbidden(err):
statusCode = http.StatusForbidden
case errdefs.IsNotModified(err):
statusCode = http.StatusNotModified
case errdefs.IsNotImplemented(err):
statusCode = http.StatusNotImplemented
case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err):
statusCode = http.StatusInternalServerError
default:
statusCode = statusCodeFromGRPCError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
}
statusCode = statusCodeFromContainerdError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
}
statusCode = statusCodeFromDistributionError(err)
if statusCode != http.StatusInternalServerError {
return statusCode
}
if e, ok := err.(causer); ok {
return FromError(e.Cause())
}
logrus.WithFields(logrus.Fields{
"module": "api",
"error_type": fmt.Sprintf("%T", err),
}).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)
}
if statusCode == 0 {
statusCode = http.StatusInternalServerError
}
return statusCode
}
// statusCodeFromGRPCError returns status code according to gRPC error
func statusCodeFromGRPCError(err error) int {
switch status.Code(err) {
case codes.InvalidArgument: // code 3
return http.StatusBadRequest
case codes.NotFound: // code 5
return http.StatusNotFound
case codes.AlreadyExists: // code 6
return http.StatusConflict
case codes.PermissionDenied: // code 7
return http.StatusForbidden
case codes.FailedPrecondition: // code 9
return http.StatusBadRequest
case codes.Unauthenticated: // code 16
return http.StatusUnauthorized
case codes.OutOfRange: // code 11
return http.StatusBadRequest
case codes.Unimplemented: // code 12
return http.StatusNotImplemented
case codes.Unavailable: // code 14
return http.StatusServiceUnavailable
default:
// codes.Canceled(1)
// codes.Unknown(2)
// codes.DeadlineExceeded(4)
// codes.ResourceExhausted(8)
// codes.Aborted(10)
// codes.Internal(13)
// codes.DataLoss(15)
return http.StatusInternalServerError
}
}
// statusCodeFromDistributionError returns status code according to registry errcode
// code is loosely based on errcode.ServeJSON() in docker/distribution
func statusCodeFromDistributionError(err error) int {
switch errs := err.(type) {
case errcode.Errors:
if len(errs) < 1 {
return http.StatusInternalServerError
}
if _, ok := errs[0].(errcode.ErrorCoder); ok {
return statusCodeFromDistributionError(errs[0])
}
case errcode.ErrorCoder:
return errs.ErrorCode().Descriptor().HTTPStatusCode
}
return http.StatusInternalServerError
}
// statusCodeFromContainerdError returns status code for containerd errors when
// consumed directly (not through gRPC)
func statusCodeFromContainerdError(err error) int {
switch {
case containerderrors.IsInvalidArgument(err):
return http.StatusBadRequest
case containerderrors.IsNotFound(err):
return http.StatusNotFound
case containerderrors.IsAlreadyExists(err):
return http.StatusConflict
case containerderrors.IsFailedPrecondition(err):
return http.StatusPreconditionFailed
case containerderrors.IsUnavailable(err):
return http.StatusServiceUnavailable
case containerderrors.IsNotImplemented(err):
return http.StatusNotImplemented
default:
return http.StatusInternalServerError
}
}

View File

@@ -1,4 +1,4 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
package httputils
import (
"io"

View File

@@ -0,0 +1,104 @@
package httputils
import (
"net/http"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/gorilla/mux"
"google.golang.org/grpc"
)
// httpStatusError is an interface
// that errors with custom status codes
// implement to tell the api layer
// which response status to set.
type httpStatusError interface {
HTTPErrorStatusCode() int
}
// inputValidationError is an interface
// that errors generated by invalid
// inputs can implement to tell the
// api layer to set a 400 status code
// in the response.
type inputValidationError interface {
IsValidationError() bool
}
// GetHTTPErrorStatusCode retrieves status code from error message.
func GetHTTPErrorStatusCode(err error) int {
if err == nil {
logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
return http.StatusInternalServerError
}
var statusCode int
errMsg := err.Error()
switch e := err.(type) {
case httpStatusError:
statusCode = e.HTTPErrorStatusCode()
case inputValidationError:
statusCode = http.StatusBadRequest
default:
// FIXME: this is brittle and should not be necessary, but we still need to identify if
// there are errors falling back into this logic.
// If we need to differentiate between different possible error types,
// we should create appropriate error types that implement the httpStatusError interface.
errStr := strings.ToLower(errMsg)
for _, status := range []struct {
keyword string
code int
}{
{"not found", http.StatusNotFound},
{"cannot find", http.StatusNotFound},
{"no such", http.StatusNotFound},
{"bad parameter", http.StatusBadRequest},
{"no command", http.StatusBadRequest},
{"conflict", http.StatusConflict},
{"impossible", http.StatusNotAcceptable},
{"wrong login/password", http.StatusUnauthorized},
{"unauthorized", http.StatusUnauthorized},
{"hasn't been activated", http.StatusForbidden},
{"this node", http.StatusServiceUnavailable},
{"needs to be unlocked", http.StatusServiceUnavailable},
{"certificates have expired", http.StatusServiceUnavailable},
} {
if strings.Contains(errStr, status.keyword) {
statusCode = status.code
break
}
}
}
if statusCode == 0 {
statusCode = http.StatusInternalServerError
}
return statusCode
}
func apiVersionSupportsJSONErrors(version string) bool {
const firstAPIVersionWithJSONErrors = "1.23"
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
}
// MakeErrorHandler makes an HTTP handler that decodes a Docker error and
// returns it in the response.
func MakeErrorHandler(err error) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
statusCode := GetHTTPErrorStatusCode(err)
vars := mux.Vars(r)
if apiVersionSupportsJSONErrors(vars["version"]) {
response := &types.ErrorResponse{
Message: err.Error(),
}
WriteJSON(w, statusCode, response)
} else {
http.Error(w, grpc.ErrorDesc(err), statusCode)
}
}
}

View File

@@ -1,7 +1,9 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
package httputils
import (
"errors"
"net/http"
"path/filepath"
"strconv"
"strings"
)
@@ -47,16 +49,6 @@ type ArchiveOptions struct {
Path string
}
type badParameterError struct {
param string
}
func (e badParameterError) Error() string {
return "bad parameter: " + e.param + "cannot be empty"
}
func (e badParameterError) InvalidParameter() {}
// ArchiveFormValues parses form values and turns them into ArchiveOptions.
// It fails if the archive name and path are not in the request.
func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) {
@@ -65,12 +57,14 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
}
name := vars["name"]
if name == "" {
return ArchiveOptions{}, badParameterError{"name"}
}
path := r.Form.Get("path")
if path == "" {
return ArchiveOptions{}, badParameterError{"path"}
path := filepath.FromSlash(r.Form.Get("path"))
switch {
case name == "":
return ArchiveOptions{}, errors.New("bad parameter: 'name' cannot be empty")
case path == "":
return ArchiveOptions{}, errors.New("bad parameter: 'path' cannot be empty")
}
return ArchiveOptions{name, path}, nil
}

View File

@@ -1,4 +1,4 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
package httputils
import (
"net/http"
@@ -23,7 +23,7 @@ func TestBoolValue(t *testing.T) {
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a := BoolValue(r, "test")
@@ -34,14 +34,14 @@ func TestBoolValue(t *testing.T) {
}
func TestBoolValueOrDefault(t *testing.T) {
r, _ := http.NewRequest(http.MethodGet, "", nil)
r, _ := http.NewRequest("GET", "", nil)
if !BoolValueOrDefault(r, "queryparam", true) {
t.Fatal("Expected to get true default value, got false")
}
v := url.Values{}
v.Set("param", "")
r, _ = http.NewRequest(http.MethodGet, "", nil)
r, _ = http.NewRequest("GET", "", nil)
r.Form = v
if BoolValueOrDefault(r, "param", true) {
t.Fatal("Expected not to get true")
@@ -59,7 +59,7 @@ func TestInt64ValueOrZero(t *testing.T) {
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a := Int64ValueOrZero(r, "test")
@@ -79,7 +79,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
for c, e := range cases {
v := url.Values{}
v.Set("test", c)
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
a, err := Int64ValueOrDefault(r, "test", -1)
@@ -95,7 +95,7 @@ func TestInt64ValueOrDefault(t *testing.T) {
func TestInt64ValueOrDefaultWithError(t *testing.T) {
v := url.Values{}
v.Set("test", "invalid")
r, _ := http.NewRequest(http.MethodPost, "", nil)
r, _ := http.NewRequest("POST", "", nil)
r.Form = v
_, err := Int64ValueOrDefault(r, "test", -1)

View File

@@ -1,19 +1,18 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
package httputils
import (
"context"
"encoding/json"
"fmt"
"io"
"mime"
"net/http"
"strings"
"github.com/docker/docker/errdefs"
"github.com/pkg/errors"
"golang.org/x/net/context"
"github.com/docker/docker/api"
)
// APIVersionKey is the client's requested API version.
type APIVersionKey struct{}
const APIVersionKey = "api-version"
// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints.
// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion).
@@ -27,7 +26,7 @@ func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
return nil, nil, err
}
// Flush the options to make sure the client sets the raw mode
_, _ = conn.Write([]byte{})
conn.Write([]byte{})
return conn, conn, nil
}
@@ -37,9 +36,9 @@ func CloseStreams(streams ...interface{}) {
if tcpc, ok := stream.(interface {
CloseWrite() error
}); ok {
_ = tcpc.CloseWrite()
tcpc.CloseWrite()
} else if closer, ok := stream.(io.Closer); ok {
_ = closer.Close()
closer.Close()
}
}
}
@@ -49,50 +48,17 @@ func CheckForJSON(r *http.Request) error {
ct := r.Header.Get("Content-Type")
// No Content-Type header is ok as long as there's no Body
if ct == "" && (r.Body == nil || r.ContentLength == 0) {
return nil
if ct == "" {
if r.Body == nil || r.ContentLength == 0 {
return nil
}
}
// Otherwise it better be json
return matchesContentType(ct, "application/json")
}
// ReadJSON validates the request to have the correct content-type, and decodes
// the request's Body into out.
func ReadJSON(r *http.Request, out interface{}) error {
err := CheckForJSON(r)
if err != nil {
return err
}
if r.Body == nil || r.ContentLength == 0 {
// an empty body is not invalid, so don't return an error; see
// https://lists.w3.org/Archives/Public/ietf-http-wg/2010JulSep/0272.html
if api.MatchesContentType(ct, "application/json") {
return nil
}
dec := json.NewDecoder(r.Body)
err = dec.Decode(out)
defer r.Body.Close()
if err != nil {
if err == io.EOF {
return errdefs.InvalidParameter(errors.New("invalid JSON: got EOF while reading request body"))
}
return errdefs.InvalidParameter(errors.Wrap(err, "invalid JSON"))
}
if dec.More() {
return errdefs.InvalidParameter(errors.New("unexpected content after JSON"))
}
return nil
}
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
return enc.Encode(v)
return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct)
}
// ParseForm ensures the request form is parsed even with invalid content types.
@@ -102,7 +68,7 @@ func ParseForm(r *http.Request) error {
return nil
}
if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
return errdefs.InvalidParameter(err)
return err
}
return nil
}
@@ -114,21 +80,9 @@ func VersionFromContext(ctx context.Context) string {
return ""
}
if val := ctx.Value(APIVersionKey{}); val != nil {
if val := ctx.Value(APIVersionKey); val != nil {
return val.(string)
}
return ""
}
// matchesContentType validates the content type against the expected one
func matchesContentType(contentType, expectedType string) error {
mimetype, _, err := mime.ParseMediaType(contentType)
if err != nil {
return errdefs.InvalidParameter(errors.Wrapf(err, "malformed Content-Type header (%s)", contentType))
}
if mimetype != expectedType {
return errdefs.InvalidParameter(errors.Errorf("unsupported Content-Type header (%s): must be '%s'", contentType, expectedType))
}
return nil
}

View File

@@ -1,130 +0,0 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"net/http"
"strings"
"testing"
)
// matchesContentType
func TestJsonContentType(t *testing.T) {
err := matchesContentType("application/json", "application/json")
if err != nil {
t.Error(err)
}
err = matchesContentType("application/json; charset=utf-8", "application/json")
if err != nil {
t.Error(err)
}
expected := "unsupported Content-Type header (dockerapplication/json): must be 'application/json'"
err = matchesContentType("dockerapplication/json", "application/json")
if err == nil || err.Error() != expected {
t.Errorf(`expected "%s", got "%v"`, expected, err)
}
expected = "malformed Content-Type header (foo;;;bar): mime: invalid media parameter"
err = matchesContentType("foo;;;bar", "application/json")
if err == nil || err.Error() != expected {
t.Errorf(`expected "%s", got "%v"`, expected, err)
}
}
func TestReadJSON(t *testing.T) {
t.Run("nil body", func(t *testing.T) {
req, err := http.NewRequest("POST", "https://example.com/some/path", nil)
if err != nil {
t.Error(err)
}
foo := struct{}{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
})
t.Run("empty body", func(t *testing.T) {
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(""))
if err != nil {
t.Error(err)
}
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
if foo.SomeField != "" {
t.Errorf("expected: '', got: %s", foo.SomeField)
}
})
t.Run("with valid request", func(t *testing.T) {
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"}`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
if foo.SomeField != "some value" {
t.Errorf("expected: 'some value', got: %s", foo.SomeField)
}
})
t.Run("with whitespace", func(t *testing.T) {
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`
{"SomeField":"some value"}
`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err != nil {
t.Error(err)
}
if foo.SomeField != "some value" {
t.Errorf("expected: 'some value', got: %s", foo.SomeField)
}
})
t.Run("with extra content", func(t *testing.T) {
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{"SomeField":"some value"} and more content`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err == nil {
t.Error("expected an error, got none")
}
expected := "unexpected content after JSON"
if err.Error() != expected {
t.Errorf("expected: '%s', got: %s", expected, err.Error())
}
})
t.Run("invalid JSON", func(t *testing.T) {
req, err := http.NewRequest("POST", "https://example.com/some/path", strings.NewReader(`{invalid json`))
if err != nil {
t.Error(err)
}
req.Header.Set("Content-Type", "application/json")
foo := struct{ SomeField string }{}
err = ReadJSON(req, &foo)
if err == nil {
t.Error("expected an error, got none")
}
expected := "invalid JSON: invalid character 'i' looking for beginning of object key string"
if err.Error() != expected {
t.Errorf("expected: '%s', got: %s", expected, err.Error())
}
})
}

View File

@@ -0,0 +1,17 @@
// +build go1.7
package httputils
import (
"encoding/json"
"net/http"
)
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
enc := json.NewEncoder(w)
enc.SetEscapeHTML(false)
return enc.Encode(v)
}

View File

@@ -0,0 +1,16 @@
// +build go1.6,!go1.7
package httputils
import (
"encoding/json"
"net/http"
)
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
enc := json.NewEncoder(w)
return enc.Encode(v)
}

View File

@@ -1,84 +0,0 @@
package httputils // import "github.com/docker/docker/api/server/httputils"
import (
"context"
"fmt"
"io"
"net/url"
"sort"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stdcopy"
)
// WriteLogStream writes an encoded byte stream of log messages from the
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
wf := ioutils.NewWriteFlusher(w)
defer wf.Close()
wf.Flush()
outStream := io.Writer(wf)
errStream := outStream
sysErrStream := errStream
if mux {
sysErrStream = stdcopy.NewStdWriter(outStream, stdcopy.Systemerr)
errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr)
outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
}
for {
msg, ok := <-msgs
if !ok {
return
}
// check if the message contains an error. if so, write that error
// and exit
if msg.Err != nil {
fmt.Fprintf(sysErrStream, "Error grabbing logs: %v\n", msg.Err)
continue
}
logLine := msg.Line
if config.Details {
logLine = append(attrsByteSlice(msg.Attrs), ' ')
logLine = append(logLine, msg.Line...)
}
if config.Timestamps {
logLine = append([]byte(msg.Timestamp.Format(jsonmessage.RFC3339NanoFixed)+" "), logLine...)
}
if msg.Source == "stdout" && config.ShowStdout {
_, _ = outStream.Write(logLine)
}
if msg.Source == "stderr" && config.ShowStderr {
_, _ = errStream.Write(logLine)
}
}
}
type byKey []backend.LogAttr
func (b byKey) Len() int { return len(b) }
func (b byKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
func (b byKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func attrsByteSlice(a []backend.LogAttr) []byte {
// Note this sorts "a" in-place. That is fine here - nothing else is
// going to use Attrs or care about the order.
sort.Sort(byKey(a))
var ret []byte
for i, pair := range a {
k, v := url.QueryEscape(pair.Key), url.QueryEscape(pair.Value)
ret = append(ret, []byte(k)...)
ret = append(ret, '=')
ret = append(ret, []byte(v)...)
if i != len(a)-1 {
ret = append(ret, ',')
}
}
return ret
}

View File

@@ -1,9 +1,9 @@
package server // import "github.com/docker/docker/api/server"
package server
import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/middleware"
"github.com/sirupsen/logrus"
)
// handlerWithGlobalMiddlewares wraps the handler function for a request with
@@ -16,7 +16,7 @@ func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputi
next = m.WrapHandler(next)
}
if logrus.GetLevel() == logrus.DebugLevel {
if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel {
next = middleware.DebugRequestMiddleware(next)
}

View File

@@ -1,10 +1,10 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
package middleware
import (
"context"
"net/http"
"github.com/sirupsen/logrus"
"github.com/Sirupsen/logrus"
"golang.org/x/net/context"
)
// CORSMiddleware injects CORS headers to each request

View File

@@ -1,16 +1,16 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
package middleware
import (
"bufio"
"context"
"encoding/json"
"io"
"net/http"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/pkg/ioutils"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
// DebugRequestMiddleware dumps the request to logger
@@ -18,7 +18,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
logrus.Debugf("Calling %s %s", r.Method, r.RequestURI)
if r.Method != http.MethodPost {
if r.Method != "POST" {
return handler(ctx, w, r, vars)
}
if err := httputils.CheckForJSON(r); err != nil {
@@ -61,24 +61,10 @@ func maskSecretKeys(inp interface{}) {
}
return
}
if form, ok := inp.(map[string]interface{}); ok {
scrub := []string{
// Note: The Data field contains the base64-encoded secret in 'secret'
// and 'config' create and update requests. Currently, no other POST
// API endpoints use a data field, so we scrub this field unconditionally.
// Change this handling to be conditional if a new endpoint is added
// in future where this field should not be scrubbed.
"data",
"jointoken",
"password",
"secret",
"signingcakey",
"unlockkey",
}
loop0:
for k, v := range form {
for _, m := range scrub {
for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} {
if strings.EqualFold(m, k) {
form[k] = "*****"
continue loop0

View File

@@ -1,75 +0,0 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
import (
"testing"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestMaskSecretKeys(t *testing.T) {
tests := []struct {
doc string
input map[string]interface{}
expected map[string]interface{}
}{
{
doc: "secret/config create and update requests",
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
},
{
doc: "masking other fields (recursively)",
input: map[string]interface{}{
"password": "pass",
"secret": "secret",
"jointoken": "jointoken",
"unlockkey": "unlockkey",
"signingcakey": "signingcakey",
"other": map[string]interface{}{
"password": "pass",
"secret": "secret",
"jointoken": "jointoken",
"unlockkey": "unlockkey",
"signingcakey": "signingcakey",
},
},
expected: map[string]interface{}{
"password": "*****",
"secret": "*****",
"jointoken": "*****",
"unlockkey": "*****",
"signingcakey": "*****",
"other": map[string]interface{}{
"password": "*****",
"secret": "*****",
"jointoken": "*****",
"unlockkey": "*****",
"signingcakey": "*****",
},
},
},
{
doc: "case insensitive field matching",
input: map[string]interface{}{
"PASSWORD": "pass",
"other": map[string]interface{}{
"PASSWORD": "pass",
},
},
expected: map[string]interface{}{
"PASSWORD": "*****",
"other": map[string]interface{}{
"PASSWORD": "*****",
},
},
},
}
for _, testcase := range tests {
t.Run(testcase.doc, func(t *testing.T) {
maskSecretKeys(testcase.input)
assert.Check(t, is.DeepEqual(testcase.expected, testcase.input))
})
}
}

View File

@@ -1,8 +1,9 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
package middleware
import (
"context"
"net/http"
"golang.org/x/net/context"
)
// ExperimentalMiddleware is a the middleware in charge of adding the

View File

@@ -1,8 +1,9 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
package middleware
import (
"context"
"net/http"
"golang.org/x/net/context"
)
// Middleware is an interface to allow the use of ordinary functions as Docker API filters.

View File

@@ -1,13 +1,13 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
package middleware
import (
"context"
"fmt"
"net/http"
"runtime"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/errors"
"github.com/docker/docker/api/types/versions"
"golang.org/x/net/context"
)
// VersionMiddleware is a middleware that
@@ -28,37 +28,24 @@ func NewVersionMiddleware(s, d, m string) VersionMiddleware {
}
}
type versionUnsupportedError struct {
version, minVersion, maxVersion string
}
func (e versionUnsupportedError) Error() string {
if e.minVersion != "" {
return fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", e.version, e.minVersion)
}
return fmt.Sprintf("client version %s is too new. Maximum supported API version is %s", e.version, e.maxVersion)
}
func (e versionUnsupportedError) InvalidParameter() {}
// WrapHandler returns a new handler function wrapping the previous one in the request chain.
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
w.Header().Set("API-Version", v.defaultVersion)
w.Header().Set("OSType", runtime.GOOS)
apiVersion := vars["version"]
if apiVersion == "" {
apiVersion = v.defaultVersion
}
if versions.LessThan(apiVersion, v.minVersion) {
return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion))
}
if versions.GreaterThan(apiVersion, v.defaultVersion) {
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
}
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS)
w.Header().Set("Server", header)
w.Header().Set("API-Version", v.defaultVersion)
w.Header().Set("OSType", runtime.GOOS)
ctx = context.WithValue(ctx, "api-version", apiVersion)
return handler(ctx, w, r, vars)
}
}

View File

@@ -1,73 +1,41 @@
package middleware // import "github.com/docker/docker/api/server/middleware"
package middleware
import (
"context"
"net/http"
"net/http/httptest"
"runtime"
"strings"
"testing"
"github.com/docker/docker/api/server/httputils"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"golang.org/x/net/context"
)
func TestVersionMiddlewareVersion(t *testing.T) {
defaultVersion := "1.10.0"
minVersion := "1.2.0"
expectedVersion := defaultVersion
func TestVersionMiddleware(t *testing.T) {
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v := httputils.VersionFromContext(ctx)
assert.Check(t, is.Equal(expectedVersion, v))
if httputils.VersionFromContext(ctx) == "" {
t.Fatal("Expected version, got empty string")
}
return nil
}
defaultVersion := "1.10.0"
minVersion := "1.2.0"
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
req, _ := http.NewRequest("GET", "/containers/json", nil)
resp := httptest.NewRecorder()
ctx := context.Background()
tests := []struct {
reqVersion string
expectedVersion string
errString string
}{
{
expectedVersion: "1.10.0",
},
{
reqVersion: "1.9.0",
expectedVersion: "1.9.0",
},
{
reqVersion: "0.1",
errString: "client version 0.1 is too old. Minimum supported API version is 1.2.0, please upgrade your client to a newer version",
},
{
reqVersion: "9999.9999",
errString: "client version 9999.9999 is too new. Maximum supported API version is 1.10.0",
},
}
for _, test := range tests {
expectedVersion = test.expectedVersion
err := h(ctx, resp, req, map[string]string{"version": test.reqVersion})
if test.errString != "" {
assert.Check(t, is.Error(err, test.errString))
} else {
assert.Check(t, err)
}
if err := h(ctx, resp, req, map[string]string{}); err != nil {
t.Fatal(err)
}
}
func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
func TestVersionMiddlewareWithErrors(t *testing.T) {
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v := httputils.VersionFromContext(ctx)
assert.Check(t, len(v) != 0)
if httputils.VersionFromContext(ctx) == "" {
t.Fatal("Expected version, got empty string")
}
return nil
}
@@ -76,17 +44,14 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
h := m.WrapHandler(handler)
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
req, _ := http.NewRequest("GET", "/containers/json", nil)
resp := httptest.NewRecorder()
ctx := context.Background()
vars := map[string]string{"version": "0.1"}
err := h(ctx, resp, req, vars)
assert.Check(t, is.ErrorContains(err, ""))
hdr := resp.Result().Header
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion))
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion))
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") {
t.Fatalf("Expected too old client error, got %v", err)
}
}

41
api/server/profiler.go Normal file
View File

@@ -0,0 +1,41 @@
package server
import (
"expvar"
"fmt"
"net/http"
"net/http/pprof"
"github.com/gorilla/mux"
)
const debugPathPrefix = "/debug/"
func profilerSetup(mainRouter *mux.Router) {
var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter()
r.HandleFunc("/vars", expVars)
r.HandleFunc("/pprof/", pprof.Index)
r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
r.HandleFunc("/pprof/profile", pprof.Profile)
r.HandleFunc("/pprof/symbol", pprof.Symbol)
r.HandleFunc("/pprof/trace", pprof.Trace)
r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP)
r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP)
r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP)
r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP)
}
// Replicated from expvar.go as not public.
func expVars(w http.ResponseWriter, r *http.Request) {
first := true
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintln(w, "{")
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintln(w, ",")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintln(w, "\n}")
}

View File

@@ -1,24 +1,20 @@
package build // import "github.com/docker/docker/api/server/router/build"
package build
import (
"context"
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"golang.org/x/net/context"
)
// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID.
type Backend interface {
// Build a Docker image returning the id of the image
// BuildFromContext builds a Docker image referenced by an imageID string.
//
// Note: Tagging an image should not be done by a Builder, it should instead be done
// by the caller.
//
// TODO: make this return a reference instead of string
Build(context.Context, backend.BuildConfig) (string, error)
// Prune build cache
PruneCache(context.Context, types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
Cancel(context.Context, string) error
}
type experimentalProvider interface {
HasExperimental() bool
BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error)
}

View File

@@ -1,26 +1,17 @@
package build // import "github.com/docker/docker/api/server/router/build"
package build
import (
"runtime"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/types"
)
import "github.com/docker/docker/api/server/router"
// buildRouter is a router to talk with the build controller
type buildRouter struct {
backend Backend
daemon experimentalProvider
routes []router.Route
features *map[string]bool
backend Backend
routes []router.Route
}
// NewRouter initializes a new build router
func NewRouter(b Backend, d experimentalProvider, features *map[string]bool) router.Router {
func NewRouter(b Backend) router.Router {
r := &buildRouter{
backend: b,
daemon: d,
features: features,
backend: b,
}
r.initRoutes()
return r
@@ -33,30 +24,6 @@ func (r *buildRouter) Routes() []router.Route {
func (r *buildRouter) initRoutes() {
r.routes = []router.Route{
router.NewPostRoute("/build", r.postBuild),
router.NewPostRoute("/build/prune", r.postPrune),
router.NewPostRoute("/build/cancel", r.postCancel),
router.Cancellable(router.NewPostRoute("/build", r.postBuild)),
}
}
// BuilderVersion derives the default docker builder version from the config.
//
// The default on Linux is version "2" (BuildKit), but the daemon can be
// configured to recommend version "1" (classic Builder). Windows does not
// yet support BuildKit for native Windows images, and uses "1" (classic builder)
// as a default.
//
// This value is only a recommendation as advertised by the daemon, and it is
// up to the client to choose which builder to use.
func BuilderVersion(features map[string]bool) types.BuilderVersion {
// TODO(thaJeztah) move the default to daemon/config
if runtime.GOOS == "windows" {
return types.BuilderV1
}
bv := types.BuilderBuildKit
if v, ok := features["buildkit"]; ok && !v {
bv = types.BuilderV1
}
return bv
}

View File

@@ -1,9 +1,7 @@
package build // import "github.com/docker/docker/api/server/router/build"
package build
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
@@ -14,60 +12,22 @@ import (
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/docker/go-units"
"golang.org/x/net/context"
)
type invalidIsolationError string
func (e invalidIsolationError) Error() string {
return fmt.Sprintf("Unsupported isolation: %q", string(e))
}
func (e invalidIsolationError) InvalidParameter() {}
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
options := &types.ImageBuildOptions{
Version: types.BuilderV1, // Builder V1 is the default, but can be overridden
Dockerfile: r.FormValue("dockerfile"),
SuppressOutput: httputils.BoolValue(r, "q"),
NoCache: httputils.BoolValue(r, "nocache"),
ForceRemove: httputils.BoolValue(r, "forcerm"),
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
Memory: httputils.Int64ValueOrZero(r, "memory"),
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
CPUPeriod: httputils.Int64ValueOrZero(r, "cpuperiod"),
CPUQuota: httputils.Int64ValueOrZero(r, "cpuquota"),
CPUSetCPUs: r.FormValue("cpusetcpus"),
CPUSetMems: r.FormValue("cpusetmems"),
CgroupParent: r.FormValue("cgroupparent"),
NetworkMode: r.FormValue("networkmode"),
Tags: r.Form["t"],
ExtraHosts: r.Form["extrahosts"],
SecurityOpt: r.Form["securityopt"],
Squash: httputils.BoolValue(r, "squash"),
Target: r.FormValue("target"),
RemoteContext: r.FormValue("remote"),
SessionID: r.FormValue("session"),
BuildID: r.FormValue("buildid"),
}
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build"))
}
version := httputils.VersionFromContext(ctx)
options := &types.ImageBuildOptions{}
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
options.Remove = true
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
@@ -78,43 +38,56 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
options.PullParent = true
}
if versions.GreaterThanOrEqualTo(version, "1.32") {
options.Platform = r.FormValue("platform")
}
if versions.GreaterThanOrEqualTo(version, "1.40") {
outputsJSON := r.FormValue("outputs")
if outputsJSON != "" {
var outputs []types.ImageBuildOutput
if err := json.Unmarshal([]byte(outputsJSON), &outputs); err != nil {
return nil, err
}
options.Outputs = outputs
}
}
if s := r.Form.Get("shmsize"); s != "" {
shmSize, err := strconv.ParseInt(s, 10, 64)
options.Dockerfile = r.FormValue("dockerfile")
options.SuppressOutput = httputils.BoolValue(r, "q")
options.NoCache = httputils.BoolValue(r, "nocache")
options.ForceRemove = httputils.BoolValue(r, "forcerm")
options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap")
options.Memory = httputils.Int64ValueOrZero(r, "memory")
options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares")
options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod")
options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota")
options.CPUSetCPUs = r.FormValue("cpusetcpus")
options.CPUSetMems = r.FormValue("cpusetmems")
options.CgroupParent = r.FormValue("cgroupparent")
options.NetworkMode = r.FormValue("networkmode")
options.Tags = r.Form["t"]
options.ExtraHosts = r.Form["extrahosts"]
options.SecurityOpt = r.Form["securityopt"]
options.Squash = httputils.BoolValue(r, "squash")
if r.Form.Get("shmsize") != "" {
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
if err != nil {
return nil, err
}
options.ShmSize = shmSize
}
if i := r.FormValue("isolation"); i != "" {
options.Isolation = container.Isolation(i)
if !options.Isolation.IsValid() {
return nil, invalidIsolationError(options.Isolation)
if i := container.Isolation(r.FormValue("isolation")); i != "" {
if !container.Isolation.IsValid(i) {
return nil, fmt.Errorf("Unsupported isolation: %q", i)
}
options.Isolation = i
}
if ulimitsJSON := r.FormValue("ulimits"); ulimitsJSON != "" {
var buildUlimits = []*units.Ulimit{}
if runtime.GOOS != "windows" && options.SecurityOpt != nil {
return nil, fmt.Errorf("the daemon on this platform does not support --security-opt to build")
}
var buildUlimits = []*units.Ulimit{}
ulimitsJSON := r.FormValue("ulimits")
if ulimitsJSON != "" {
if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil {
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading ulimit settings")
return nil, err
}
options.Ulimits = buildUlimits
}
var buildArgs = map[string]*string{}
buildArgsJSON := r.FormValue("buildargs")
// Note that there are two ways a --build-arg might appear in the
// json of the query param:
// "foo":"bar"
@@ -127,189 +100,34 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
// the fact they mentioned it, we need to pass that along to the builder
// so that it can print a warning about "foo" being unused if there is
// no "ARG foo" in the Dockerfile.
if buildArgsJSON := r.FormValue("buildargs"); buildArgsJSON != "" {
var buildArgs = map[string]*string{}
if buildArgsJSON != "" {
if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil {
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading build args")
return nil, err
}
options.BuildArgs = buildArgs
}
if labelsJSON := r.FormValue("labels"); labelsJSON != "" {
var labels = map[string]string{}
var labels = map[string]string{}
labelsJSON := r.FormValue("labels")
if labelsJSON != "" {
if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil {
return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading labels")
return nil, err
}
options.Labels = labels
}
if cacheFromJSON := r.FormValue("cachefrom"); cacheFromJSON != "" {
var cacheFrom = []string{}
var cacheFrom = []string{}
cacheFromJSON := r.FormValue("cachefrom")
if cacheFromJSON != "" {
if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil {
return nil, err
}
options.CacheFrom = cacheFrom
}
if bv := r.FormValue("version"); bv != "" {
v, err := parseVersion(bv)
if err != nil {
return nil, err
}
options.Version = v
}
return options, nil
}
func parseVersion(s string) (types.BuilderVersion, error) {
switch types.BuilderVersion(s) {
case types.BuilderV1:
return types.BuilderV1, nil
case types.BuilderBuildKit:
return types.BuilderBuildKit, nil
default:
return "", errors.Errorf("invalid version %q", s)
}
}
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
fltrs, err := filters.FromJSON(r.Form.Get("filters"))
if err != nil {
return errors.Wrap(err, "could not parse filters")
}
ksfv := r.FormValue("keep-storage")
if ksfv == "" {
ksfv = "0"
}
ks, err := strconv.Atoi(ksfv)
if err != nil {
return errors.Wrapf(err, "keep-storage is in bytes and expects an integer, got %v", ksfv)
}
opts := types.BuildCachePruneOptions{
All: httputils.BoolValue(r, "all"),
Filters: fltrs,
KeepStorage: int64(ks),
}
report, err := br.backend.PruneCache(ctx, opts)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, report)
}
func (br *buildRouter) postCancel(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
w.Header().Set("Content-Type", "application/json")
id := r.FormValue("id")
if id == "" {
return errors.Errorf("build ID not provided")
}
return br.backend.Cancel(ctx, id)
}
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var (
notVerboseBuffer = bytes.NewBuffer(nil)
version = httputils.VersionFromContext(ctx)
)
w.Header().Set("Content-Type", "application/json")
body := r.Body
var ww io.Writer = w
if body != nil {
// there is a possibility that output is written before request body
// has been fully read so we need to protect against it.
// this can be removed when
// https://github.com/golang/go/issues/15527
// https://github.com/golang/go/issues/22209
// has been fixed
body, ww = wrapOutputBufferedUntilRequestRead(body, ww)
}
output := ioutils.NewWriteFlusher(ww)
defer func() { _ = output.Close() }()
errf := func(err error) error {
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
_, _ = output.Write(notVerboseBuffer.Bytes())
}
// Do not write the error in the http output if it's still empty.
// This prevents from writing a 200(OK) when there is an internal error.
if !output.Flushed() {
return err
}
_, err = output.Write(streamformatter.FormatError(err))
if err != nil {
logrus.Warnf("could not write error response: %v", err)
}
return nil
}
buildOptions, err := newImageBuildOptions(ctx, r)
if err != nil {
return errf(err)
}
buildOptions.AuthConfigs = getAuthConfigs(r.Header)
if buildOptions.Squash && !br.daemon.HasExperimental() {
return errdefs.InvalidParameter(errors.New("squash is only supported with experimental mode"))
}
out := io.Writer(output)
if buildOptions.SuppressOutput {
out = notVerboseBuffer
}
// Currently, only used if context is from a remote url.
// Look at code in DetectContextFromRemoteURL for more information.
createProgressReader := func(in io.ReadCloser) io.ReadCloser {
progressOutput := streamformatter.NewJSONProgressOutput(out, true)
return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext)
}
wantAux := versions.GreaterThanOrEqualTo(version, "1.30")
imgID, err := br.backend.Build(ctx, backend.BuildConfig{
Source: body,
Options: buildOptions,
ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader),
})
if err != nil {
return errf(err)
}
// Everything worked so if -q was provided the output from the daemon
// should be just the image ID and we'll print that to stdout.
if buildOptions.SuppressOutput {
_, _ = fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID)
}
return nil
}
func getAuthConfigs(header http.Header) map[string]types.AuthConfig {
authConfigs := map[string]types.AuthConfig{}
authConfigsEncoded := header.Get("X-Registry-Config")
if authConfigsEncoded == "" {
return authConfigs
}
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
// Pulling an image does not error when no auth is provided so to remain
// consistent with the existing api decode errors are ignored
_ = json.NewDecoder(authConfigsJSON).Decode(&authConfigs)
return authConfigs
}
type syncWriter struct {
w io.Writer
mu sync.Mutex
@@ -322,118 +140,87 @@ func (s *syncWriter) Write(b []byte) (count int, err error) {
return
}
func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter {
out = &syncWriter{w: out}
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var (
authConfigs = map[string]types.AuthConfig{}
authConfigsEncoded = r.Header.Get("X-Registry-Config")
notVerboseBuffer = bytes.NewBuffer(nil)
)
var aux *streamformatter.AuxFormatter
if wantAux {
aux = &streamformatter.AuxFormatter{Writer: out}
if authConfigsEncoded != "" {
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting
// to be empty.
}
}
return backend.ProgressWriter{
w.Header().Set("Content-Type", "application/json")
output := ioutils.NewWriteFlusher(w)
defer output.Close()
sf := streamformatter.NewJSONStreamFormatter()
errf := func(err error) error {
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
output.Write(notVerboseBuffer.Bytes())
}
// Do not write the error in the http output if it's still empty.
// This prevents from writing a 200(OK) when there is an internal error.
if !output.Flushed() {
return err
}
_, err = w.Write(sf.FormatError(err))
if err != nil {
logrus.Warnf("could not write error response: %v", err)
}
return nil
}
buildOptions, err := newImageBuildOptions(ctx, r)
if err != nil {
return errf(err)
}
buildOptions.AuthConfigs = authConfigs
remoteURL := r.FormValue("remote")
// Currently, only used if context is from a remote url.
// Look at code in DetectContextFromRemoteURL for more information.
createProgressReader := func(in io.ReadCloser) io.ReadCloser {
progressOutput := sf.NewProgressOutput(output, true)
if buildOptions.SuppressOutput {
progressOutput = sf.NewProgressOutput(notVerboseBuffer, true)
}
return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL)
}
out := io.Writer(output)
if buildOptions.SuppressOutput {
out = notVerboseBuffer
}
out = &syncWriter{w: out}
stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf}
stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf}
pg := backend.ProgressWriter{
Output: out,
StdoutFormatter: streamformatter.NewStdoutWriter(out),
StderrFormatter: streamformatter.NewStderrWriter(out),
AuxFormatter: aux,
StdoutFormatter: stdout,
StderrFormatter: stderr,
ProgressReaderFunc: createProgressReader,
}
}
type flusher interface {
Flush()
}
func wrapOutputBufferedUntilRequestRead(rc io.ReadCloser, out io.Writer) (io.ReadCloser, io.Writer) {
var fl flusher = &ioutils.NopFlusher{}
if f, ok := out.(flusher); ok {
fl = f
}
w := &wcf{
buf: bytes.NewBuffer(nil),
Writer: out,
flusher: fl,
}
r := bufio.NewReader(rc)
_, err := r.Peek(1)
imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg)
if err != nil {
return rc, out
return errf(err)
}
rc = &rcNotifier{
Reader: r,
Closer: rc,
notify: w.notify,
// Everything worked so if -q was provided the output from the daemon
// should be just the image ID and we'll print that to stdout.
if buildOptions.SuppressOutput {
stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
fmt.Fprintf(stdout, "%s\n", string(imgID))
}
return rc, w
}
type rcNotifier struct {
io.Reader
io.Closer
notify func()
}
func (r *rcNotifier) Read(b []byte) (int, error) {
n, err := r.Reader.Read(b)
if err != nil {
r.notify()
}
return n, err
}
func (r *rcNotifier) Close() error {
r.notify()
return r.Closer.Close()
}
type wcf struct {
io.Writer
flusher
mu sync.Mutex
ready bool
buf *bytes.Buffer
flushed bool
}
func (w *wcf) Flush() {
w.mu.Lock()
w.flushed = true
if !w.ready {
w.mu.Unlock()
return
}
w.mu.Unlock()
w.flusher.Flush()
}
func (w *wcf) Flushed() bool {
w.mu.Lock()
b := w.flushed
w.mu.Unlock()
return b
}
func (w *wcf) Write(b []byte) (int, error) {
w.mu.Lock()
if !w.ready {
n, err := w.buf.Write(b)
w.mu.Unlock()
return n, err
}
w.mu.Unlock()
return w.Writer.Write(b)
}
func (w *wcf) notify() {
w.mu.Lock()
if !w.ready {
if w.buf.Len() > 0 {
_, _ = io.Copy(w.Writer, w.buf)
}
if w.flushed {
w.flusher.Flush()
}
w.ready = true
}
w.mu.Unlock()
return nil
}

View File

@@ -1,4 +1,4 @@
package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint"
package checkpoint
import "github.com/docker/docker/api/types"

View File

@@ -1,4 +1,4 @@
package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint"
package checkpoint
import (
"github.com/docker/docker/api/server/httputils"
@@ -29,8 +29,8 @@ func (r *checkpointRouter) Routes() []router.Route {
func (r *checkpointRouter) initRoutes() {
r.routes = []router.Route{
router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental),
router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental),
router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental),
router.Experimental(router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints)),
router.Experimental(router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint)),
router.Experimental(router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint)),
}
}

View File

@@ -1,11 +1,12 @@
package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint"
package checkpoint
import (
"context"
"encoding/json"
"net/http"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
)
func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -14,7 +15,9 @@ func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.R
}
var options types.CheckpointCreateOptions
if err := httputils.ReadJSON(r, &options); err != nil {
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&options); err != nil {
return err
}

View File

@@ -1,14 +1,15 @@
package container // import "github.com/docker/docker/api/server/router/container"
package container
import (
"context"
"io"
"time"
"golang.org/x/net/context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/pkg/archive"
)
@@ -17,7 +18,7 @@ type execBackend interface {
ContainerExecCreate(name string, config *types.ExecConfig) (string, error)
ContainerExecInspect(id string) (*backend.ExecInspect, error)
ContainerExecResize(name string, height, width int) error
ContainerExecStart(ctx context.Context, name string, options container.ExecStartOptions) error
ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error
ExecExists(name string) (bool, error)
}
@@ -26,31 +27,31 @@ type copyBackend interface {
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
ContainerCopy(name string, res string) (io.ReadCloser, error)
ContainerExport(name string, out io.Writer) error
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
}
// stateBackend includes functions to implement to provide container state lifecycle functionality.
type stateBackend interface {
ContainerCreate(config types.ContainerCreateConfig) (container.CreateResponse, error)
ContainerKill(name string, signal string) error
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
ContainerKill(name string, sig uint64) error
ContainerPause(name string) error
ContainerRename(oldName, newName string) error
ContainerResize(name string, height, width int) error
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
ContainerRestart(name string, seconds *int) error
ContainerRm(name string, config *types.ContainerRmConfig) error
ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
ContainerStop(name string, seconds *int) error
ContainerUnpause(name string) error
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
ContainerWait(name string, timeout time.Duration) (int, error)
}
// monitorBackend includes functions to implement to provide containers monitoring functionality.
type monitorBackend interface {
ContainerChanges(name string) ([]archive.Change, error)
ContainerInspect(name string, size bool, version string) (interface{}, error)
ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error
ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error
ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error)
@@ -64,16 +65,11 @@ type attachBackend interface {
// systemBackend includes functions to implement to provide system wide containers functionality
type systemBackend interface {
ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error)
}
type commitBackend interface {
CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error)
ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error)
}
// Backend is all the methods that need to be implemented to provide container specific functionality.
type Backend interface {
commitBackend
execBackend
copyBackend
stateBackend

View File

@@ -1,24 +1,30 @@
package container // import "github.com/docker/docker/api/server/router/container"
package container
import (
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
)
type validationError struct {
error
}
func (validationError) IsValidationError() bool {
return true
}
// containerRouter is a router to talk with the container controller
type containerRouter struct {
backend Backend
decoder httputils.ContainerDecoder
routes []router.Route
cgroup2 bool
}
// NewRouter initializes a new container router
func NewRouter(b Backend, decoder httputils.ContainerDecoder, cgroup2 bool) router.Router {
func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
r := &containerRouter{
backend: b,
decoder: decoder,
cgroup2: cgroup2,
}
r.initRoutes()
return r
@@ -40,8 +46,8 @@ func (r *containerRouter) initRoutes() {
router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs),
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats),
router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)),
router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)),
router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
@@ -56,14 +62,13 @@ func (r *containerRouter) initRoutes() {
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
router.NewPostRoute("/containers/prune", r.postContainersPrune),
router.NewPostRoute("/commit", r.postCommit),
// PUT
router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
// DELETE

View File

@@ -1,76 +1,33 @@
package container // import "github.com/docker/docker/api/server/router/container"
package container
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"runtime"
"strconv"
"syscall"
"time"
"github.com/containerd/containerd/platforms"
"github.com/docker/docker/api/server/httpstatus"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/versions"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/ioutils"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/stdcopy"
"golang.org/x/net/context"
"golang.org/x/net/websocket"
)
func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
// TODO: remove pause arg, and always pause in backend
pause := httputils.BoolValue(r, "pause")
version := httputils.VersionFromContext(ctx)
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
pause = true
}
config, _, _, err := s.decoder.DecodeConfig(r.Body)
if err != nil && err != io.EOF { // Do not fail if body is empty.
return err
}
commitCfg := &backend.CreateImageConfig{
Pause: pause,
Repo: r.Form.Get("repo"),
Tag: r.Form.Get("tag"),
Author: r.Form.Get("author"),
Comment: r.Form.Get("comment"),
Config: config,
Changes: r.Form["changes"],
}
imgID, err := s.backend.CreateImageFromContainer(r.Form.Get("container"), commitCfg)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ID: imgID})
}
func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
filter, err := filters.FromJSON(r.Form.Get("filters"))
filter, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
@@ -108,16 +65,11 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
if !stream {
w.Header().Set("Content-Type", "application/json")
}
var oneShot bool
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.41") {
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
}
config := &backend.ContainerStatsConfig{
Stream: stream,
OneShot: oneShot,
OutStream: w,
Version: httputils.VersionFromContext(ctx),
Version: string(httputils.VersionFromContext(ctx)),
}
return s.backend.ContainerStats(ctx, vars["name"], config)
@@ -135,37 +87,37 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
// with the appropriate status code.
stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr")
if !(stdout || stderr) {
return errdefs.InvalidParameter(errors.New("Bad parameters: you must choose at least one stream"))
return fmt.Errorf("Bad parameters: you must choose at least one stream")
}
containerName := vars["name"]
logsConfig := &types.ContainerLogsOptions{
Follow: httputils.BoolValue(r, "follow"),
Timestamps: httputils.BoolValue(r, "timestamps"),
Since: r.Form.Get("since"),
Until: r.Form.Get("until"),
Tail: r.Form.Get("tail"),
ShowStdout: stdout,
ShowStderr: stderr,
Details: httputils.BoolValue(r, "details"),
logsConfig := &backend.ContainerLogsConfig{
ContainerLogsOptions: types.ContainerLogsOptions{
Follow: httputils.BoolValue(r, "follow"),
Timestamps: httputils.BoolValue(r, "timestamps"),
Since: r.Form.Get("since"),
Tail: r.Form.Get("tail"),
ShowStdout: stdout,
ShowStderr: stderr,
Details: httputils.BoolValue(r, "details"),
},
OutStream: w,
}
msgs, tty, err := s.backend.ContainerLogs(ctx, containerName, logsConfig)
if err != nil {
return err
chStarted := make(chan struct{})
if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil {
select {
case <-chStarted:
// The client may be expecting all of the data we're sending to
// be multiplexed, so mux it through the Systemerr stream, which
// will cause the client to throw an error when demuxing
stdwriter := stdcopy.NewStdWriter(logsConfig.OutStream, stdcopy.Systemerr)
fmt.Fprintf(stdwriter, "Error running logs job: %v\n", err)
default:
return err
}
}
contentType := types.MediaTypeRawStream
if !tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
contentType = types.MediaTypeMultiplexedStream
}
w.Header().Set("Content-Type", contentType)
// if has a tty, we're not muxing streams. if it doesn't, we are. simple.
// this is the point of no return for writing a response. once we call
// WriteLogStream, the response has been started and errors will be
// returned in band by WriteLogStream
httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty)
return nil
}
@@ -173,14 +125,6 @@ func (s *containerRouter) getContainersExport(ctx context.Context, w http.Respon
return s.backend.ContainerExport(vars["name"], w)
}
type bodyOnStartError struct{}
func (bodyOnStartError) Error() string {
return "starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"
}
func (bodyOnStartError) InvalidParameter() {}
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// If contentLength is -1, we can assumed chunked encoding
// or more technically that the length is unknown
@@ -194,7 +138,7 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon
// A non-nil json object is at least 7 characters.
if r.ContentLength > 7 || r.ContentLength == -1 {
if versions.GreaterThanOrEqualTo(version, "1.24") {
return bodyOnStartError{}
return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")}
}
if err := httputils.CheckForJSON(r); err != nil {
@@ -227,39 +171,47 @@ func (s *containerRouter) postContainersStop(ctx context.Context, w http.Respons
return err
}
var (
options container.StopOptions
version = httputils.VersionFromContext(ctx)
)
if versions.GreaterThanOrEqualTo(version, "1.42") {
options.Signal = r.Form.Get("signal")
}
var seconds *int
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
valSeconds, err := strconv.Atoi(tmpSeconds)
if err != nil {
return err
}
options.Timeout = &valSeconds
seconds = &valSeconds
}
if err := s.backend.ContainerStop(ctx, vars["name"], options); err != nil {
if err := s.backend.ContainerStop(vars["name"], seconds); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
type errContainerIsRunning interface {
ContainerIsRunning() bool
}
func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var sig syscall.Signal
name := vars["name"]
if err := s.backend.ContainerKill(name, r.Form.Get("signal")); err != nil {
// If we have a signal, look at it. Otherwise, do nothing
if sigStr := r.Form.Get("signal"); sigStr != "" {
var err error
if sig, err = signal.ParseSignal(sigStr); err != nil {
return err
}
}
if err := s.backend.ContainerKill(name, uint64(sig)); err != nil {
var isStopped bool
if errdefs.IsConflict(err) {
isStopped = true
if e, ok := err.(errContainerIsRunning); ok {
isStopped = !e.ContainerIsRunning()
}
// Return error that's not caused because the container is stopped.
@@ -267,7 +219,7 @@ func (s *containerRouter) postContainersKill(ctx context.Context, w http.Respons
// to keep backwards compatibility.
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped {
return errors.Wrapf(err, "Cannot kill container: %s", name)
return fmt.Errorf("Cannot kill container %s: %v", name, err)
}
}
@@ -280,26 +232,21 @@ func (s *containerRouter) postContainersRestart(ctx context.Context, w http.Resp
return err
}
var (
options container.StopOptions
version = httputils.VersionFromContext(ctx)
)
if versions.GreaterThanOrEqualTo(version, "1.42") {
options.Signal = r.Form.Get("signal")
}
var seconds *int
if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" {
valSeconds, err := strconv.Atoi(tmpSeconds)
if err != nil {
return err
}
options.Timeout = &valSeconds
seconds = &valSeconds
}
if err := s.backend.ContainerRestart(ctx, vars["name"], options); err != nil {
if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
@@ -332,67 +279,13 @@ func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.Resp
}
func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// Behavior changed in version 1.30 to handle wait condition and to
// return headers immediately.
version := httputils.VersionFromContext(ctx)
legacyBehaviorPre130 := versions.LessThan(version, "1.30")
legacyRemovalWaitPre134 := false
// The wait condition defaults to "not-running".
waitCondition := containerpkg.WaitConditionNotRunning
if !legacyBehaviorPre130 {
if err := httputils.ParseForm(r); err != nil {
return err
}
if v := r.Form.Get("condition"); v != "" {
switch container.WaitCondition(v) {
case container.WaitConditionNotRunning:
waitCondition = containerpkg.WaitConditionNotRunning
case container.WaitConditionNextExit:
waitCondition = containerpkg.WaitConditionNextExit
case container.WaitConditionRemoved:
waitCondition = containerpkg.WaitConditionRemoved
legacyRemovalWaitPre134 = versions.LessThan(version, "1.34")
default:
return errdefs.InvalidParameter(errors.Errorf("invalid condition: %q", v))
}
}
}
waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition)
status, err := s.backend.ContainerWait(vars["name"], -1*time.Second)
if err != nil {
return err
}
w.Header().Set("Content-Type", "application/json")
if !legacyBehaviorPre130 {
// Write response header immediately.
w.WriteHeader(http.StatusOK)
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
}
// Block on the result of the wait operation.
status := <-waitC
// With API < 1.34, wait on WaitConditionRemoved did not return
// in case container removal failed. The only way to report an
// error back to the client is to not write anything (i.e. send
// an empty response which will be treated as an error).
if legacyRemovalWaitPre134 && status.Err() != nil {
return nil
}
var waitError *container.WaitExitError
if status.Err() != nil {
waitError = &container.WaitExitError{Message: status.Err().Error()}
}
return json.NewEncoder(w).Encode(&container.WaitResponse{
StatusCode: int64(status.ExitCode()),
Error: waitError,
return httputils.WriteJSON(w, http.StatusOK, &container.ContainerWaitOKBody{
StatusCode: int64(status),
})
}
@@ -436,26 +329,15 @@ func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.Respon
if err := httputils.ParseForm(r); err != nil {
return err
}
var updateConfig container.UpdateConfig
if err := httputils.ReadJSON(r, &updateConfig); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if versions.LessThan(httputils.VersionFromContext(ctx), "1.40") {
updateConfig.PidsLimit = nil
}
if versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
// Ignore KernelMemory removed in API 1.42.
updateConfig.KernelMemory = 0
}
var updateConfig container.UpdateConfig
if updateConfig.PidsLimit != nil && *updateConfig.PidsLimit <= 0 {
// Both `0` and `-1` are accepted to set "unlimited" when updating.
// Historically, any negative value was accepted, so treat them as
// "unlimited" as well.
var unlimited int64
updateConfig.PidsLimit = &unlimited
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&updateConfig); err != nil {
return err
}
hostConfig := &container.HostConfig{
@@ -494,97 +376,12 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
hostConfig.AutoRemove = false
}
if hostConfig != nil && versions.LessThan(version, "1.40") {
// Ignore BindOptions.NonRecursive because it was added in API 1.40.
for _, m := range hostConfig.Mounts {
if bo := m.BindOptions; bo != nil {
bo.NonRecursive = false
}
}
// Ignore KernelMemoryTCP because it was added in API 1.40.
hostConfig.KernelMemoryTCP = 0
// Older clients (API < 1.40) expects the default to be shareable, make them happy
if hostConfig.IpcMode.IsEmpty() {
hostConfig.IpcMode = container.IPCModeShareable
}
}
if hostConfig != nil && versions.LessThan(version, "1.41") && !s.cgroup2 {
// Older clients expect the default to be "host" on cgroup v1 hosts
if hostConfig.CgroupnsMode.IsEmpty() {
hostConfig.CgroupnsMode = container.CgroupnsModeHost
}
}
if hostConfig != nil && versions.LessThan(version, "1.42") {
for _, m := range hostConfig.Mounts {
// Ignore BindOptions.CreateMountpoint because it was added in API 1.42.
if bo := m.BindOptions; bo != nil {
bo.CreateMountpoint = false
}
// These combinations are invalid, but weren't validated in API < 1.42.
// We reset them here, so that validation doesn't produce an error.
if o := m.VolumeOptions; o != nil && m.Type != mount.TypeVolume {
m.VolumeOptions = nil
}
if o := m.TmpfsOptions; o != nil && m.Type != mount.TypeTmpfs {
m.TmpfsOptions = nil
}
if bo := m.BindOptions; bo != nil {
// Ignore BindOptions.CreateMountpoint because it was added in API 1.42.
bo.CreateMountpoint = false
}
}
}
if hostConfig != nil && versions.GreaterThanOrEqualTo(version, "1.42") {
// Ignore KernelMemory removed in API 1.42.
hostConfig.KernelMemory = 0
for _, m := range hostConfig.Mounts {
if o := m.VolumeOptions; o != nil && m.Type != mount.TypeVolume {
return errdefs.InvalidParameter(fmt.Errorf("VolumeOptions must not be specified on mount type %q", m.Type))
}
if o := m.BindOptions; o != nil && m.Type != mount.TypeBind {
return errdefs.InvalidParameter(fmt.Errorf("BindOptions must not be specified on mount type %q", m.Type))
}
if o := m.TmpfsOptions; o != nil && m.Type != mount.TypeTmpfs {
return errdefs.InvalidParameter(fmt.Errorf("TmpfsOptions must not be specified on mount type %q", m.Type))
}
}
}
if hostConfig != nil && runtime.GOOS == "linux" && versions.LessThan(version, "1.42") {
// ConsoleSize is not respected by Linux daemon before API 1.42
hostConfig.ConsoleSize = [2]uint{0, 0}
}
var platform *specs.Platform
if versions.GreaterThanOrEqualTo(version, "1.41") {
if v := r.Form.Get("platform"); v != "" {
p, err := platforms.Parse(v)
if err != nil {
return errdefs.InvalidParameter(err)
}
platform = &p
}
}
if hostConfig != nil && hostConfig.PidsLimit != nil && *hostConfig.PidsLimit <= 0 {
// Don't set a limit if either no limit was specified, or "unlimited" was
// explicitly set.
// Both `0` and `-1` are accepted as "unlimited", and historically any
// negative value was accepted, so treat those as "unlimited" as well.
hostConfig.PidsLimit = nil
}
ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{
Name: name,
Config: config,
HostConfig: hostConfig,
NetworkingConfig: networkingConfig,
AdjustCPUShares: adjustCPUShares,
Platform: platform,
})
if err != nil {
return err
@@ -621,11 +418,11 @@ func (s *containerRouter) postContainersResize(ctx context.Context, w http.Respo
height, err := strconv.Atoi(r.Form.Get("h"))
if err != nil {
return errdefs.InvalidParameter(err)
return err
}
width, err := strconv.Atoi(r.Form.Get("w"))
if err != nil {
return errdefs.InvalidParameter(err)
return err
}
return s.backend.ContainerResize(vars["name"], height, width)
@@ -643,11 +440,10 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
hijacker, ok := w.(http.Hijacker)
if !ok {
return errdefs.InvalidParameter(errors.Errorf("error attaching to container %s, hijack connection missing", containerName))
return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName)
}
contentType := types.MediaTypeRawStream
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
conn, _, err := hijacker.Hijack()
if err != nil {
return nil, nil, nil, err
@@ -657,10 +453,7 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
conn.Write([]byte{})
if upgrade {
if multiplexed && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
contentType = types.MediaTypeMultiplexedStream
}
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n")
} else {
fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
}
@@ -684,16 +477,16 @@ func (s *containerRouter) postContainersAttach(ctx context.Context, w http.Respo
}
if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil {
logrus.WithError(err).Errorf("Handler for %s %s returned error", r.Method, r.URL.Path)
logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
// Remember to close stream if error happens
conn, _, errHijack := hijacker.Hijack()
if errHijack != nil {
logrus.WithError(err).Errorf("Handler for %s %s: unable to close stream; error when hijacking connection", r.Method, r.URL.Path)
} else {
statusCode := httpstatus.FromError(err)
if errHijack == nil {
statusCode := httputils.GetHTTPErrorStatusCode(err)
statusText := http.StatusText(statusCode)
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: %s\r\n\r\n%s\r\n", statusCode, statusText, contentType, err.Error())
fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error())
httputils.CloseStreams(conn)
} else {
logrus.Errorf("Error Hijacking: %v", err)
}
}
return nil
@@ -713,7 +506,7 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
version := httputils.VersionFromContext(ctx)
setupStreams := func(multiplexed bool) (io.ReadCloser, io.Writer, io.Writer, error) {
setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) {
wsChan := make(chan *websocket.Conn)
h := func(conn *websocket.Conn) {
wsChan <- conn
@@ -735,33 +528,22 @@ func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.Respons
return conn, conn, conn, nil
}
useStdin, useStdout, useStderr := true, true, true
if versions.GreaterThanOrEqualTo(version, "1.42") {
useStdin = httputils.BoolValue(r, "stdin")
useStdout = httputils.BoolValue(r, "stdout")
useStderr = httputils.BoolValue(r, "stderr")
}
attachConfig := &backend.ContainerAttachConfig{
GetStreams: setupStreams,
UseStdin: useStdin,
UseStdout: useStdout,
UseStderr: useStderr,
Logs: httputils.BoolValue(r, "logs"),
Stream: httputils.BoolValue(r, "stream"),
DetachKeys: detachKeys,
MuxStreams: false, // never multiplex, as we rely on websocket to manage distinct streams
UseStdin: true,
UseStdout: true,
UseStderr: true,
MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr
}
err = s.backend.ContainerAttach(containerName, attachConfig)
close(done)
select {
case <-started:
if err != nil {
logrus.Errorf("Error attaching websocket: %s", err)
} else {
logrus.Debug("websocket connection was closed by client")
}
logrus.Errorf("Error attaching websocket: %s", err)
return nil
default:
}
@@ -773,12 +555,12 @@ func (s *containerRouter) postContainersPrune(ctx context.Context, w http.Respon
return err
}
pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters)
pruneReport, err := s.backend.ContainersPrune(pruneFilters)
if err != nil {
return err
}

View File

@@ -1,49 +1,50 @@
package container // import "github.com/docker/docker/api/server/router/container"
package container
import (
"compress/flate"
"compress/gzip"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
gddohttputil "github.com/golang/gddo/httputil"
"golang.org/x/net/context"
)
type pathError struct{}
func (pathError) Error() string {
return "Path cannot be empty"
}
func (pathError) InvalidParameter() {}
// postContainersCopy is deprecated in favor of getContainersArchive.
//
// Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
// Deprecated since 1.8, Errors out since 1.12
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.24") {
w.WriteHeader(http.StatusNotFound)
return nil
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
cfg := types.CopyConfig{}
if err := httputils.ReadJSON(r, &cfg); err != nil {
if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil {
return err
}
if cfg.Resource == "" {
return pathError{}
return fmt.Errorf("Path cannot be empty")
}
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
if err != nil {
if strings.Contains(strings.ToLower(err.Error()), "no such container") {
w.WriteHeader(http.StatusNotFound)
return nil
}
if os.IsNotExist(err) {
return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"])
}
return err
}
defer data.Close()
@@ -82,29 +83,6 @@ func (s *containerRouter) headContainersArchive(ctx context.Context, w http.Resp
return setContainerPathStatHeader(stat, w.Header())
}
func writeCompressedResponse(w http.ResponseWriter, r *http.Request, body io.Reader) error {
var cw io.Writer
switch gddohttputil.NegotiateContentEncoding(r, []string{"gzip", "deflate"}) {
case "gzip":
gw := gzip.NewWriter(w)
defer gw.Close()
cw = gw
w.Header().Set("Content-Encoding", "gzip")
case "deflate":
fw, err := flate.NewWriter(w, flate.DefaultCompression)
if err != nil {
return err
}
defer fw.Close()
cw = fw
w.Header().Set("Content-Encoding", "deflate")
default:
cw = w
}
_, err := io.Copy(cw, body)
return err
}
func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
v, err := httputils.ArchiveFormValues(r, vars)
if err != nil {
@@ -122,7 +100,9 @@ func (s *containerRouter) getContainersArchive(ctx context.Context, w http.Respo
}
w.Header().Set("Content-Type", "application/x-tar")
return writeCompressedResponse(w, r, tarArchive)
_, err = io.Copy(w, tarArchive)
return err
}
func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -132,7 +112,5 @@ func (s *containerRouter) putContainersArchive(ctx context.Context, w http.Respo
}
noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir")
copyUIDGID := httputils.BoolValue(r, "copyUIDGID")
return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body)
return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body)
}

View File

@@ -1,19 +1,18 @@
package container // import "github.com/docker/docker/api/server/router/container"
package container
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/stdcopy"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -25,38 +24,28 @@ func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter
return httputils.WriteJSON(w, http.StatusOK, eConfig)
}
type execCommandError struct{}
func (execCommandError) Error() string {
return "No exec command specified"
}
func (execCommandError) InvalidParameter() {}
func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
name := vars["name"]
execConfig := &types.ExecConfig{}
if err := httputils.ReadJSON(r, execConfig); err != nil {
if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil {
return err
}
if len(execConfig.Cmd) == 0 {
return execCommandError{}
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.42") {
// Not supported by API versions before 1.42
execConfig.ConsoleSize = nil
return fmt.Errorf("No exec command specified")
}
// Register an instance of Exec in container.
id, err := s.backend.ContainerExecCreate(vars["name"], execConfig)
id, err := s.backend.ContainerExecCreate(name, execConfig)
if err != nil {
logrus.Errorf("Error setting up exec command in container %s: %v", vars["name"], err)
logrus.Errorf("Error setting up exec command in container %s: %v", name, err)
return err
}
@@ -72,11 +61,9 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.22") {
// API versions before 1.22 did not enforce application/json content-type.
// Allow older clients to work by patching the content-type.
if r.Header.Get("Content-Type") != "application/json" {
r.Header.Set("Content-Type", "application/json")
if versions.GreaterThan(version, "1.21") {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
}
@@ -87,7 +74,7 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
)
execStartCheck := &types.ExecStartCheck{}
if err := httputils.ReadJSON(r, execStartCheck); err != nil {
if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil {
return err
}
@@ -95,18 +82,6 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
return err
}
if execStartCheck.ConsoleSize != nil {
// Not supported before 1.42
if versions.LessThan(version, "1.42") {
execStartCheck.ConsoleSize = nil
}
// No console without tty
if !execStartCheck.Tty {
execStartCheck.ConsoleSize = nil
}
}
if !execStartCheck.Detach {
var err error
// Setting up the streaming http interface.
@@ -117,11 +92,7 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
defer httputils.CloseStreams(inStream, outStream)
if _, ok := r.Header["Upgrade"]; ok {
contentType := types.MediaTypeRawStream
if !execStartCheck.Tty && versions.GreaterThanOrEqualTo(httputils.VersionFromContext(ctx), "1.42") {
contentType = types.MediaTypeMultiplexedStream
}
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: "+contentType+"\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n")
} else {
fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n")
}
@@ -140,21 +111,14 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
}
}
options := container.ExecStartOptions{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
ConsoleSize: execStartCheck.ConsoleSize,
}
// Now run the user process in container.
// Maybe we should we pass ctx here if we're not detaching?
if err := s.backend.ContainerExecStart(context.Background(), execName, options); err != nil {
if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil {
if execStartCheck.Detach {
return err
}
stdout.Write([]byte(err.Error() + "\r\n"))
logrus.Errorf("Error running exec %s in container: %v", execName, err)
logrus.Errorf("Error running exec in container: %v", err)
}
return nil
}
@@ -165,11 +129,11 @@ func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.Re
}
height, err := strconv.Atoi(r.Form.Get("h"))
if err != nil {
return errdefs.InvalidParameter(err)
return err
}
width, err := strconv.Atoi(r.Form.Get("w"))
if err != nil {
return errdefs.InvalidParameter(err)
return err
}
return s.backend.ContainerExecResize(vars["name"], height, width)

View File

@@ -1,10 +1,10 @@
package container // import "github.com/docker/docker/api/server/router/container"
package container
import (
"context"
"net/http"
"github.com/docker/docker/api/server/httputils"
"golang.org/x/net/context"
)
// getContainersByName inspects container's configuration and serializes it as json.

View File

@@ -1,53 +0,0 @@
package debug // import "github.com/docker/docker/api/server/router/debug"
import (
"context"
"expvar"
"net/http"
"net/http/pprof"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
)
// NewRouter creates a new debug router
// The debug router holds endpoints for debug the daemon, such as those for pprof.
func NewRouter() router.Router {
r := &debugRouter{}
r.initRoutes()
return r
}
type debugRouter struct {
routes []router.Route
}
func (r *debugRouter) initRoutes() {
r.routes = []router.Route{
router.NewGetRoute("/vars", frameworkAdaptHandler(expvar.Handler())),
router.NewGetRoute("/pprof/", frameworkAdaptHandlerFunc(pprof.Index)),
router.NewGetRoute("/pprof/cmdline", frameworkAdaptHandlerFunc(pprof.Cmdline)),
router.NewGetRoute("/pprof/profile", frameworkAdaptHandlerFunc(pprof.Profile)),
router.NewGetRoute("/pprof/symbol", frameworkAdaptHandlerFunc(pprof.Symbol)),
router.NewGetRoute("/pprof/trace", frameworkAdaptHandlerFunc(pprof.Trace)),
router.NewGetRoute("/pprof/{name}", handlePprof),
}
}
func (r *debugRouter) Routes() []router.Route {
return r.routes
}
func frameworkAdaptHandler(handler http.Handler) httputils.APIFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
handler.ServeHTTP(w, r)
return nil
}
}
func frameworkAdaptHandlerFunc(handler http.HandlerFunc) httputils.APIFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
handler(w, r)
return nil
}
}

View File

@@ -1,12 +0,0 @@
package debug // import "github.com/docker/docker/api/server/router/debug"
import (
"context"
"net/http"
"net/http/pprof"
)
func handlePprof(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
pprof.Handler(vars["name"]).ServeHTTP(w, r)
return nil
}

View File

@@ -1,15 +0,0 @@
package distribution // import "github.com/docker/docker/api/server/router/distribution"
import (
"context"
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
)
// Backend is all the methods that need to be implemented
// to provide image specific functionality.
type Backend interface {
GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, error)
}

View File

@@ -1,31 +0,0 @@
package distribution // import "github.com/docker/docker/api/server/router/distribution"
import "github.com/docker/docker/api/server/router"
// distributionRouter is a router to talk with the registry
type distributionRouter struct {
backend Backend
routes []router.Route
}
// NewRouter initializes a new distribution router
func NewRouter(backend Backend) router.Router {
r := &distributionRouter{
backend: backend,
}
r.initRoutes()
return r
}
// Routes returns the available routes
func (r *distributionRouter) Routes() []router.Route {
return r.routes
}
// initRoutes initializes the routes in the distribution router
func (r *distributionRouter) initRoutes() {
r.routes = []router.Route{
// GET
router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo),
}
}

View File

@@ -1,150 +0,0 @@
package distribution // import "github.com/docker/docker/api/server/router/distribution"
import (
"context"
"encoding/base64"
"encoding/json"
"net/http"
"strings"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/errdefs"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
w.Header().Set("Content-Type", "application/json")
var (
config = &types.AuthConfig{}
authEncoded = r.Header.Get("X-Registry-Auth")
distributionInspect registrytypes.DistributionInspect
)
if authEncoded != "" {
authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJSON).Decode(&config); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
config = &types.AuthConfig{}
}
}
image := vars["name"]
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
ref, err := reference.ParseAnyReference(image)
if err != nil {
return errdefs.InvalidParameter(err)
}
namedRef, ok := ref.(reference.Named)
if !ok {
if _, ok := ref.(reference.Digested); ok {
// full image ID
return errors.Errorf("no manifest found for full image ID")
}
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
}
distrepo, err := s.backend.GetRepository(ctx, namedRef, config)
if err != nil {
return err
}
blobsrvc := distrepo.Blobs(ctx)
if canonicalRef, ok := namedRef.(reference.Canonical); !ok {
namedRef = reference.TagNameOnly(namedRef)
taggedRef, ok := namedRef.(reference.NamedTagged)
if !ok {
return errdefs.InvalidParameter(errors.Errorf("image reference not tagged: %s", image))
}
descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag())
if err != nil {
return err
}
distributionInspect.Descriptor = v1.Descriptor{
MediaType: descriptor.MediaType,
Digest: descriptor.Digest,
Size: descriptor.Size,
}
} else {
// TODO(nishanttotla): Once manifests can be looked up as a blob, the
// descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest())
// instead of having to manually fill in the fields
distributionInspect.Descriptor.Digest = canonicalRef.Digest()
}
// we have a digest, so we can retrieve the manifest
mnfstsrvc, err := distrepo.Manifests(ctx)
if err != nil {
return err
}
mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest)
if err != nil {
switch err {
case reference.ErrReferenceInvalidFormat,
reference.ErrTagInvalidFormat,
reference.ErrDigestInvalidFormat,
reference.ErrNameContainsUppercase,
reference.ErrNameEmpty,
reference.ErrNameTooLong,
reference.ErrNameNotCanonical:
return errdefs.InvalidParameter(err)
}
return err
}
mediaType, payload, err := mnfst.Payload()
if err != nil {
return err
}
// update MediaType because registry might return something incorrect
distributionInspect.Descriptor.MediaType = mediaType
if distributionInspect.Descriptor.Size == 0 {
distributionInspect.Descriptor.Size = int64(len(payload))
}
// retrieve platform information depending on the type of manifest
switch mnfstObj := mnfst.(type) {
case *manifestlist.DeserializedManifestList:
for _, m := range mnfstObj.Manifests {
distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{
Architecture: m.Platform.Architecture,
OS: m.Platform.OS,
OSVersion: m.Platform.OSVersion,
OSFeatures: m.Platform.OSFeatures,
Variant: m.Platform.Variant,
})
}
case *schema2.DeserializedManifest:
configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest)
var platform v1.Platform
if err == nil {
err := json.Unmarshal(configJSON, &platform)
if err == nil && (platform.OS != "" || platform.Architecture != "") {
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
}
}
case *schema1.SignedManifest:
platform := v1.Platform{
Architecture: mnfstObj.Architecture,
OS: "linux",
}
distributionInspect.Platforms = append(distributionInspect.Platforms, platform)
}
return httputils.WriteJSON(w, http.StatusOK, distributionInspect)
}

View File

@@ -1,12 +1,19 @@
package router // import "github.com/docker/docker/api/server/router"
package router
import (
"context"
"errors"
"net/http"
"golang.org/x/net/context"
apierrors "github.com/docker/docker/api/errors"
"github.com/docker/docker/api/server/httputils"
)
var (
errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon with --experimental in order to enable it.")
)
// ExperimentalRoute defines an experimental API route that can be enabled or disabled.
type ExperimentalRoute interface {
Route
@@ -32,19 +39,11 @@ func (r *experimentalRoute) Disable() {
r.handler = experimentalHandler
}
type notImplementedError struct{}
func (notImplementedError) Error() string {
return "This experimental feature is disabled by default. Start the Docker daemon in experimental mode in order to enable it."
}
func (notImplementedError) NotImplemented() {}
func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
return notImplementedError{}
return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented)
}
// Handler returns the APIFunc to let the server wrap it in middlewares.
// Handler returns returns the APIFunc to let the server wrap it in middlewares.
func (r *experimentalRoute) Handler() httputils.APIFunc {
return r.handler
}

View File

@@ -1,8 +0,0 @@
package grpc // import "github.com/docker/docker/api/server/router/grpc"
import "google.golang.org/grpc"
// Backend abstracts a registerable GRPC service.
type Backend interface {
RegisterGRPC(*grpc.Server)
}

View File

@@ -1,41 +0,0 @@
package grpc // import "github.com/docker/docker/api/server/router/grpc"
import (
"github.com/docker/docker/api/server/router"
"github.com/moby/buildkit/util/grpcerrors"
"golang.org/x/net/http2"
"google.golang.org/grpc"
)
type grpcRouter struct {
routes []router.Route
grpcServer *grpc.Server
h2Server *http2.Server
}
// NewRouter initializes a new grpc http router
func NewRouter(backends ...Backend) router.Router {
r := &grpcRouter{
h2Server: &http2.Server{},
grpcServer: grpc.NewServer(
grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor),
grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor),
),
}
for _, b := range backends {
b.RegisterGRPC(r.grpcServer)
}
r.initRoutes()
return r
}
// Routes returns the available routers to the session controller
func (gr *grpcRouter) Routes() []router.Route {
return gr.routes
}
func (gr *grpcRouter) initRoutes() {
gr.routes = []router.Route{
router.NewPostRoute("/grpc", gr.serveGRPC),
}
}

View File

@@ -1,45 +0,0 @@
package grpc // import "github.com/docker/docker/api/server/router/grpc"
import (
"context"
"net/http"
"github.com/pkg/errors"
"golang.org/x/net/http2"
)
func (gr *grpcRouter) serveGRPC(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
h, ok := w.(http.Hijacker)
if !ok {
return errors.New("handler does not support hijack")
}
proto := r.Header.Get("Upgrade")
if proto == "" {
return errors.New("no upgrade proto in request")
}
if proto != "h2c" {
return errors.Errorf("protocol %s not supported", proto)
}
conn, _, err := h.Hijack()
if err != nil {
return err
}
resp := &http.Response{
StatusCode: http.StatusSwitchingProtocols,
ProtoMajor: 1,
ProtoMinor: 1,
Header: http.Header{},
}
resp.Header.Set("Connection", "Upgrade")
resp.Header.Set("Upgrade", proto)
// set raw mode
conn.Write([]byte{})
resp.Write(conn)
// https://godoc.org/golang.org/x/net/http2#Server.ServeConn
// TODO: is it a problem that conn has already been written to?
gr.h2Server.ServeConn(conn, &http2.ServeConnOpts{Handler: gr.grpcServer})
return nil
}

View File

@@ -1,42 +1,46 @@
package image // import "github.com/docker/docker/api/server/router/image"
package image
import (
"context"
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/registry"
dockerimage "github.com/docker/docker/image"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/net/context"
)
// Backend is all the methods that need to be implemented
// to provide image specific functionality.
type Backend interface {
containerBackend
imageBackend
importExportBackend
registryBackend
}
type containerBackend interface {
Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error)
}
type imageBackend interface {
ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
ImageHistory(imageName string) ([]*image.HistoryResponseItem, error)
Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error)
GetImage(refOrID string, platform *specs.Platform) (retImg *dockerimage.Image, retErr error)
TagImage(imageName, repository, tag string) (string, error)
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error)
LookupImage(name string) (*types.ImageInspect, error)
TagImage(imageName, repository, tag string) error
ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error)
}
type importExportBackend interface {
LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error
ImportImage(src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error
ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error
ExportImage(names []string, outStream io.Writer) error
}
type registryBackend interface {
PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error)
}

View File

@@ -1,28 +1,22 @@
package image // import "github.com/docker/docker/api/server/router/image"
package image
import (
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/reference"
)
// imageRouter is a router to talk with the image controller
type imageRouter struct {
backend Backend
referenceBackend reference.Store
imageStore image.Store
layerStore layer.Store
routes []router.Route
backend Backend
decoder httputils.ContainerDecoder
routes []router.Route
}
// NewRouter initializes a new image router
func NewRouter(backend Backend, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router {
func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router {
r := &imageRouter{
backend: backend,
referenceBackend: referenceBackend,
imageStore: imageStore,
layerStore: layerStore,
backend: backend,
decoder: decoder,
}
r.initRoutes()
return r
@@ -44,9 +38,10 @@ func (r *imageRouter) initRoutes() {
router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory),
router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName),
// POST
router.NewPostRoute("/commit", r.postCommit),
router.NewPostRoute("/images/load", r.postImagesLoad),
router.NewPostRoute("/images/create", r.postImagesCreate),
router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush),
router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)),
router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)),
router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag),
router.NewPostRoute("/images/prune", r.postImagesPrune),
// DELETE

View File

@@ -1,61 +1,93 @@
package image // import "github.com/docker/docker/api/server/router/image"
package image
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/streamformatter"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/docker/docker/registry"
"golang.org/x/net/context"
)
func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
if err := httputils.CheckForJSON(r); err != nil {
return err
}
cname := r.Form.Get("container")
pause := httputils.BoolValue(r, "pause")
version := httputils.VersionFromContext(ctx)
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
pause = true
}
c, _, _, err := s.decoder.DecodeConfig(r.Body)
if err != nil && err != io.EOF { //Do not fail if body is empty.
return err
}
if c == nil {
c = &container.Config{}
}
commitCfg := &backend.ContainerCommitConfig{
ContainerCommitConfig: types.ContainerCommitConfig{
Pause: pause,
Repo: r.Form.Get("repo"),
Tag: r.Form.Get("tag"),
Author: r.Form.Get("author"),
Comment: r.Form.Get("comment"),
Config: c,
MergeConfigs: true,
},
Changes: r.Form["changes"],
}
imgID, err := s.backend.Commit(cname, commitCfg)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{
ID: string(imgID),
})
}
// Creates an image from Pull or from Import
func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
var (
image = r.Form.Get("fromImage")
repo = r.Form.Get("repo")
tag = r.Form.Get("tag")
message = r.Form.Get("message")
progressErr error
output = ioutils.NewWriteFlusher(w)
platform *specs.Platform
image = r.Form.Get("fromImage")
repo = r.Form.Get("repo")
tag = r.Form.Get("tag")
message = r.Form.Get("message")
err error
output = ioutils.NewWriteFlusher(w)
)
defer output.Close()
w.Header().Set("Content-Type", "application/json")
version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, "1.32") {
if p := r.FormValue("platform"); p != "" {
sp, err := platforms.Parse(p)
if err != nil {
return err
}
platform = &sp
}
}
if image != "" { // pull
if image != "" { //pull
metaHeaders := map[string][]string{}
for k, v := range r.Header {
if strings.HasPrefix(k, "X-Meta-") {
@@ -73,16 +105,21 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite
authConfig = &types.AuthConfig{}
}
}
progressErr = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
} else { // import
err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output)
} else { //import
src := r.Form.Get("fromSrc")
progressErr = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"])
// 'err' MUST NOT be defined within this block, we need any error
// generated from the download to be available to the output
// stream processing below
err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"])
}
if progressErr != nil {
if err != nil {
if !output.Flushed() {
return progressErr
return err
}
_, _ = output.Write(streamformatter.FormatError(progressErr))
sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
@@ -111,7 +148,7 @@ func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter,
} else {
// the old format is supported for compatibility if there was no authConfig header
if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil {
return errors.Wrap(errdefs.InvalidParameter(err), "Bad parameters and missing X-Registry-Auth")
return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err)
}
}
@@ -127,7 +164,8 @@ func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter,
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
}
@@ -152,7 +190,8 @@ func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
sf := streamformatter.NewJSONStreamFormatter()
output.Write(sf.FormatError(err))
}
return nil
}
@@ -168,19 +207,11 @@ func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter,
output := ioutils.NewWriteFlusher(w)
defer output.Close()
if err := s.backend.LoadImage(r.Body, output, quiet); err != nil {
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
type missingImageError struct{}
func (missingImageError) Error() string {
return "image name cannot be blank"
}
func (missingImageError) InvalidParameter() {}
func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
@@ -189,7 +220,7 @@ func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r
name := vars["name"]
if strings.TrimSpace(name) == "" {
return missingImageError{}
return fmt.Errorf("image name cannot be blank")
}
force := httputils.BoolValue(r, "force")
@@ -204,12 +235,7 @@ func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r
}
func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
image, err := s.backend.GetImage(vars["name"], nil)
if err != nil {
return err
}
imageInspect, err := s.toImageInspect(image)
imageInspect, err := s.backend.LookupImage(vars["name"])
if err != nil {
return err
}
@@ -217,115 +243,23 @@ func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
}
func (s *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, error) {
refs := s.referenceBackend.References(img.ID().Digest())
repoTags := []string{}
repoDigests := []string{}
for _, ref := range refs {
switch ref.(type) {
case reference.NamedTagged:
repoTags = append(repoTags, reference.FamiliarString(ref))
case reference.Canonical:
repoDigests = append(repoDigests, reference.FamiliarString(ref))
}
}
var size int64
var layerMetadata map[string]string
layerID := img.RootFS.ChainID()
if layerID != "" {
l, err := s.layerStore.Get(layerID)
if err != nil {
return nil, err
}
defer layer.ReleaseAndLog(s.layerStore, l)
size = l.Size()
layerMetadata, err = l.Metadata()
if err != nil {
return nil, err
}
}
comment := img.Comment
if len(comment) == 0 && len(img.History) > 0 {
comment = img.History[len(img.History)-1].Comment
}
lastUpdated, err := s.imageStore.GetLastUpdated(img.ID())
if err != nil {
return nil, err
}
return &types.ImageInspect{
ID: img.ID().String(),
RepoTags: repoTags,
RepoDigests: repoDigests,
Parent: img.Parent.String(),
Comment: comment,
Created: img.Created.Format(time.RFC3339Nano),
Container: img.Container,
ContainerConfig: &img.ContainerConfig,
DockerVersion: img.DockerVersion,
Author: img.Author,
Config: img.Config,
Architecture: img.Architecture,
Variant: img.Variant,
Os: img.OperatingSystem(),
OsVersion: img.OSVersion,
Size: size,
VirtualSize: size, // TODO: field unused, deprecate
GraphDriver: types.GraphDriverData{
Name: s.layerStore.DriverName(),
Data: layerMetadata,
},
RootFS: rootFSToAPIType(img.RootFS),
Metadata: types.ImageMetadata{
LastTagTime: lastUpdated,
},
}, nil
}
func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
var layers []string
for _, l := range rootfs.DiffIDs {
layers = append(layers, l.String())
}
return types.RootFS{
Type: rootfs.Type,
Layers: layers,
}
}
func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
imageFilters, err := filters.FromJSON(r.Form.Get("filters"))
imageFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
version := httputils.VersionFromContext(ctx)
if versions.LessThan(version, "1.41") {
// NOTE: filter is a shell glob string applied to repository names.
filterParam := r.Form.Get("filter")
if filterParam != "" {
imageFilters.Add("reference", filterParam)
}
filterParam := r.Form.Get("filter")
// FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12
if filterParam != "" {
imageFilters.Add("reference", filterParam)
}
var sharedSize bool
if versions.GreaterThanOrEqualTo(version, "1.42") {
// NOTE: Support for the "shared-size" parameter was added in API 1.42.
sharedSize = httputils.BoolValue(r, "shared-size")
}
images, err := s.backend.Images(ctx, types.ImageListOptions{
All: httputils.BoolValue(r, "all"),
Filters: imageFilters,
SharedSize: sharedSize,
})
images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false)
if err != nil {
return err
}
@@ -347,7 +281,7 @@ func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter,
if err := httputils.ParseForm(r); err != nil {
return err
}
if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil {
if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
@@ -377,21 +311,15 @@ func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter
headers[k] = v
}
}
var limit int
limit := registry.DefaultSearchLimit
if r.Form.Get("limit") != "" {
var err error
limit, err = strconv.Atoi(r.Form.Get("limit"))
if err != nil || limit < 0 {
return errdefs.InvalidParameter(errors.Wrap(err, "invalid limit specified"))
limitValue, err := strconv.Atoi(r.Form.Get("limit"))
if err != nil {
return err
}
limit = limitValue
}
searchFilters, err := filters.FromJSON(r.Form.Get("filters"))
if err != nil {
return err
}
query, err := s.backend.SearchRegistryForImages(ctx, searchFilters, r.Form.Get("term"), limit, config, headers)
query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers)
if err != nil {
return err
}
@@ -403,12 +331,12 @@ func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter
return err
}
pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters)
pruneReport, err := s.backend.ImagesPrune(pruneFilters)
if err != nil {
return err
}

View File

@@ -1,15 +1,12 @@
package router // import "github.com/docker/docker/api/server/router"
package router
import (
"net/http"
"github.com/docker/docker/api/server/httputils"
"golang.org/x/net/context"
)
// RouteWrapper wraps a route with extra functionality.
// It is passed in when creating a new route.
type RouteWrapper func(r Route) Route
// localRoute defines an individual API route to connect
// with the docker daemon. It implements Route.
type localRoute struct {
@@ -34,40 +31,66 @@ func (l localRoute) Path() string {
}
// NewRoute initializes a new local route for the router.
func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
var r Route = localRoute{method, path, handler}
for _, o := range opts {
r = o(r)
}
return r
func NewRoute(method, path string, handler httputils.APIFunc) Route {
return localRoute{method, path, handler}
}
// NewGetRoute initializes a new route with the http method GET.
func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodGet, path, handler, opts...)
func NewGetRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("GET", path, handler)
}
// NewPostRoute initializes a new route with the http method POST.
func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodPost, path, handler, opts...)
func NewPostRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("POST", path, handler)
}
// NewPutRoute initializes a new route with the http method PUT.
func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodPut, path, handler, opts...)
func NewPutRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("PUT", path, handler)
}
// NewDeleteRoute initializes a new route with the http method DELETE.
func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodDelete, path, handler, opts...)
func NewDeleteRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("DELETE", path, handler)
}
// NewOptionsRoute initializes a new route with the http method OPTIONS.
func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodOptions, path, handler, opts...)
func NewOptionsRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("OPTIONS", path, handler)
}
// NewHeadRoute initializes a new route with the http method HEAD.
func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route {
return NewRoute(http.MethodHead, path, handler, opts...)
func NewHeadRoute(path string, handler httputils.APIFunc) Route {
return NewRoute("HEAD", path, handler)
}
func cancellableHandler(h httputils.APIFunc) httputils.APIFunc {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if notifier, ok := w.(http.CloseNotifier); ok {
notify := notifier.CloseNotify()
notifyCtx, cancel := context.WithCancel(ctx)
finished := make(chan struct{})
defer close(finished)
ctx = notifyCtx
go func() {
select {
case <-notify:
cancel()
case <-finished:
}
}()
}
return h(ctx, w, r, vars)
}
}
// Cancellable makes new route which embeds http.CloseNotifier feature to
// context.Context of handler.
func Cancellable(r Route) Route {
return localRoute{
method: r.Method(),
path: r.Path(),
handler: cancellableHandler(r.Handler()),
}
}

View File

@@ -1,32 +1,20 @@
package network // import "github.com/docker/docker/api/server/router/network"
package network
import (
"context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/libnetwork"
"github.com/docker/libnetwork"
)
// Backend is all the methods that need to be implemented
// to provide network specific functionality.
type Backend interface {
FindNetwork(idName string) (libnetwork.Network, error)
GetNetworks(filters.Args, types.NetworkListConfig) ([]types.NetworkResource, error)
GetNetworks() []libnetwork.Network
CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error
DeleteNetwork(networkID string) error
NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error)
}
// ClusterBackend is all the methods that need to be implemented
// to provide cluster network specific functionality.
type ClusterBackend interface {
GetNetworks(filters.Args) ([]types.NetworkResource, error)
GetNetwork(name string) (types.NetworkResource, error)
GetNetworksByName(name string) ([]types.NetworkResource, error)
CreateNetwork(nc types.NetworkCreateRequest) (string, error)
RemoveNetwork(name string) error
DeleteNetwork(name string) error
NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error)
}

View File

@@ -1 +1,82 @@
package network // import "github.com/docker/docker/api/server/router/network"
package network
import (
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/runconfig"
)
func filterNetworkByType(nws []types.NetworkResource, netType string) ([]types.NetworkResource, error) {
retNws := []types.NetworkResource{}
switch netType {
case "builtin":
for _, nw := range nws {
if runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
case "custom":
for _, nw := range nws {
if !runconfig.IsPreDefinedNetwork(nw.Name) {
retNws = append(retNws, nw)
}
}
default:
return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType)
}
return retNws, nil
}
// filterNetworks filters network list according to user specified filter
// and returns user chosen networks
func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) {
// if filter is empty, return original network list
if filter.Len() == 0 {
return nws, nil
}
displayNet := []types.NetworkResource{}
for _, nw := range nws {
if filter.Include("driver") {
if !filter.ExactMatch("driver", nw.Driver) {
continue
}
}
if filter.Include("name") {
if !filter.Match("name", nw.Name) {
continue
}
}
if filter.Include("id") {
if !filter.Match("id", nw.ID) {
continue
}
}
if filter.Include("label") {
if !filter.MatchKVList("label", nw.Labels) {
continue
}
}
displayNet = append(displayNet, nw)
}
if filter.Include("type") {
typeNet := []types.NetworkResource{}
errFilter := filter.WalkValues("type", func(fval string) error {
passList, err := filterNetworkByType(displayNet, fval)
if err != nil {
return err
}
typeNet = append(typeNet, passList...)
return nil
})
if errFilter != nil {
return nil, errFilter
}
displayNet = typeNet
}
return displayNet, nil
}

View File

@@ -0,0 +1,115 @@
// +build !windows
package network
import (
"strings"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
)
func TestFilterNetworks(t *testing.T) {
networks := []types.NetworkResource{
{
Name: "host",
Driver: "host",
},
{
Name: "bridge",
Driver: "bridge",
},
{
Name: "none",
Driver: "null",
},
{
Name: "myoverlay",
Driver: "overlay",
},
{
Name: "mydrivernet",
Driver: "mydriver",
},
}
bridgeDriverFilters := filters.NewArgs()
bridgeDriverFilters.Add("driver", "bridge")
overlayDriverFilters := filters.NewArgs()
overlayDriverFilters.Add("driver", "overlay")
nonameDriverFilters := filters.NewArgs()
nonameDriverFilters.Add("driver", "noname")
customDriverFilters := filters.NewArgs()
customDriverFilters.Add("type", "custom")
builtinDriverFilters := filters.NewArgs()
builtinDriverFilters.Add("type", "builtin")
invalidDriverFilters := filters.NewArgs()
invalidDriverFilters.Add("type", "invalid")
testCases := []struct {
filter filters.Args
resultCount int
err string
}{
{
filter: bridgeDriverFilters,
resultCount: 1,
err: "",
},
{
filter: overlayDriverFilters,
resultCount: 1,
err: "",
},
{
filter: nonameDriverFilters,
resultCount: 0,
err: "",
},
{
filter: customDriverFilters,
resultCount: 2,
err: "",
},
{
filter: builtinDriverFilters,
resultCount: 3,
err: "",
},
{
filter: invalidDriverFilters,
resultCount: 0,
err: "Invalid filter: 'type'='invalid'",
},
}
for _, testCase := range testCases {
result, err := filterNetworks(networks, testCase.filter)
if testCase.err != "" {
if err == nil {
t.Fatalf("expect error '%s', got no error", testCase.err)
} else if !strings.Contains(err.Error(), testCase.err) {
t.Fatalf("expect error '%s', got '%s'", testCase.err, err)
}
} else {
if err != nil {
t.Fatalf("expect no error, got error '%s'", err)
}
// Make sure result is not nil
if result == nil {
t.Fatal("filterNetworks should not return nil")
}
if len(result) != testCase.resultCount {
t.Fatalf("expect '%d' networks, got '%d' networks", testCase.resultCount, len(result))
}
}
}
}

View File

@@ -1,18 +1,19 @@
package network // import "github.com/docker/docker/api/server/router/network"
package network
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/daemon/cluster"
)
// networkRouter is a router to talk with the network controller
type networkRouter struct {
backend Backend
cluster ClusterBackend
cluster *cluster.Cluster
routes []router.Route
}
// NewRouter initializes a new network router
func NewRouter(b Backend, c ClusterBackend) router.Router {
func NewRouter(b Backend, c *cluster.Cluster) router.Router {
r := &networkRouter{
backend: b,
cluster: c,

View File

@@ -1,20 +1,33 @@
package network // import "github.com/docker/docker/api/server/router/network"
package network
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"golang.org/x/net/context"
"github.com/docker/docker/api/errors"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
netconst "github.com/docker/docker/libnetwork/datastore"
"github.com/pkg/errors"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/networkdb"
)
var (
// acceptedNetworkFilters is a list of acceptable filters
acceptedNetworkFilters = map[string]bool{
"driver": true,
"type": true,
"name": true,
"id": true,
"label": true,
}
)
func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -22,71 +35,52 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
return err
}
filter, err := filters.FromJSON(r.Form.Get("filters"))
filter := r.Form.Get("filters")
netFilters, err := filters.FromParam(filter)
if err != nil {
return err
}
if err := network.ValidateFilters(filter); err != nil {
if err := netFilters.Validate(acceptedNetworkFilters); err != nil {
return err
}
var list []types.NetworkResource
nr, err := n.cluster.GetNetworks(filter)
if err == nil {
list = nr
list := []types.NetworkResource{}
if nr, err := n.cluster.GetNetworks(); err == nil {
list = append(list, nr...)
}
// Combine the network list returned by Docker daemon if it is not already
// returned by the cluster manager
localNetworks, err := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: versions.LessThan(httputils.VersionFromContext(ctx), "1.28")})
SKIP:
for _, nw := range n.backend.GetNetworks() {
for _, nl := range list {
if nl.ID == nw.ID() {
continue SKIP
}
}
var nr *types.NetworkResource
// Versions < 1.28 fetches all the containers attached to a network
// in a network list api call. It is a heavy weight operation when
// run across all the networks. Starting API version 1.28, this detailed
// info is available for network specific GET API (equivalent to inspect)
if versions.LessThan(httputils.VersionFromContext(ctx), "1.28") {
nr = n.buildDetailedNetworkResources(nw, false)
} else {
nr = n.buildNetworkResource(nw)
}
list = append(list, *nr)
}
list, err = filterNetworks(list, netFilters)
if err != nil {
return err
}
var idx map[string]bool
if len(list) > 0 {
idx = make(map[string]bool, len(list))
for _, n := range list {
idx[n.ID] = true
}
}
for _, n := range localNetworks {
if idx[n.ID] {
continue
}
list = append(list, n)
}
if list == nil {
list = []types.NetworkResource{}
}
return httputils.WriteJSON(w, http.StatusOK, list)
}
type invalidRequestError struct {
cause error
}
func (e invalidRequestError) Error() string {
return e.cause.Error()
}
func (e invalidRequestError) InvalidParameter() {}
type ambigousResultsError string
func (e ambigousResultsError) Error() string {
return "network " + string(e) + " is ambiguous"
}
func (ambigousResultsError) InvalidParameter() {}
func nameConflict(name string) error {
return errdefs.Conflict(libnetwork.NetworkNameError(name))
}
func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
@@ -99,10 +93,10 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
)
if v := r.URL.Query().Get("verbose"); v != "" {
if verbose, err = strconv.ParseBool(v); err != nil {
return errors.Wrapf(invalidRequestError{err}, "invalid value for verbose: %s", v)
err = fmt.Errorf("invalid value for verbose: %s", v)
return errors.NewBadRequestError(err)
}
}
scope := r.URL.Query().Get("scope")
// In case multiple networks have duplicate names, return error.
// TODO (yongtang): should we wrap with version here for backward compatibility?
@@ -115,48 +109,24 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
listByFullName := map[string]types.NetworkResource{}
listByPartialID := map[string]types.NetworkResource{}
// TODO(@cpuguy83): All this logic for figuring out which network to return does not belong here
// Instead there should be a backend function to just get one network.
filter := filters.NewArgs(filters.Arg("idOrName", term))
if scope != "" {
filter.Add("scope", scope)
}
nw, _ := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: true, Verbose: verbose})
nw := n.backend.GetNetworks()
for _, network := range nw {
if network.ID == term {
return httputils.WriteJSON(w, http.StatusOK, network)
if network.ID() == term {
return httputils.WriteJSON(w, http.StatusOK, *n.buildDetailedNetworkResources(network, verbose))
}
if network.Name == term {
if network.Name() == term {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByFullName[network.ID] = network
listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, verbose)
}
if strings.HasPrefix(network.ID, term) {
if strings.HasPrefix(network.ID(), term) {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByPartialID[network.ID] = network
listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, verbose)
}
}
nwk, err := n.cluster.GetNetwork(term)
if err == nil {
// If the get network is passed with a specific network ID / partial network ID
// or if the get network was passed with a network name and scope as swarm
// return the network. Skipped using isMatchingScope because it is true if the scope
// is not set which would be case if the client API v1.30
if strings.HasPrefix(nwk.ID, term) || (netconst.SwarmScope == scope) {
// If we have a previous match "backend", return it, we need verbose when enabled
// ex: overlay/partial_ID or name/swarm_scope
if nwv, ok := listByPartialID[nwk.ID]; ok {
nwk = nwv
} else if nwv, ok := listByFullName[nwk.ID]; ok {
nwk = nwv
}
return httputils.WriteJSON(w, http.StatusOK, nwk)
}
}
nr, _ := n.cluster.GetNetworks(filter)
nr, _ := n.cluster.GetNetworks()
for _, network := range nr {
if network.ID == term {
return httputils.WriteJSON(w, http.StatusOK, network)
@@ -186,7 +156,7 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
}
}
if len(listByFullName) > 1 {
return errors.Wrapf(ambigousResultsError(term), "%d matches found based on name", len(listByFullName))
return fmt.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName))
}
// Find based on partial ID, returns true only if no duplicates
@@ -196,24 +166,29 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
}
}
if len(listByPartialID) > 1 {
return errors.Wrapf(ambigousResultsError(term), "%d matches found based on ID prefix", len(listByPartialID))
return fmt.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID))
}
return libnetwork.ErrNoSuchNetwork(term)
}
func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var create types.NetworkCreateRequest
if err := httputils.ParseForm(r); err != nil {
return err
}
var create types.NetworkCreateRequest
if err := httputils.ReadJSON(r, &create); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&create); err != nil {
return err
}
if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
return nameConflict(create.Name)
return libnetwork.NetworkNameError(create.Name)
}
nw, err := n.backend.CreateNetwork(create)
@@ -223,7 +198,7 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
// check if user defined CheckDuplicate, if set true, return err
// otherwise prepare a warning message
if create.CheckDuplicate {
return nameConflict(create.Name)
return libnetwork.NetworkNameError(create.Name)
}
warning = libnetwork.NetworkNameError(create.Name).Error()
}
@@ -245,29 +220,33 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
}
func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var connect types.NetworkConnect
if err := httputils.ParseForm(r); err != nil {
return err
}
var connect types.NetworkConnect
if err := httputils.ReadJSON(r, &connect); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&connect); err != nil {
return err
}
// Unlike other operations, we does not check ambiguity of the name/ID here.
// The reason is that, In case of attachable network in swarm scope, the actual local network
// may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork`
// does the ambiguity check anyway. Therefore, passing the name to daemon would be enough.
return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig)
}
func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var disconnect types.NetworkDisconnect
if err := httputils.ParseForm(r); err != nil {
return err
}
var disconnect types.NetworkDisconnect
if err := httputils.ReadJSON(r, &disconnect); err != nil {
if err := httputils.CheckForJSON(r); err != nil {
return err
}
if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil {
return err
}
@@ -278,113 +257,205 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
if err := httputils.ParseForm(r); err != nil {
return err
}
nw, err := n.findUniqueNetwork(vars["id"])
if err != nil {
return err
if _, err := n.cluster.GetNetwork(vars["id"]); err == nil {
if err = n.cluster.RemoveNetwork(vars["id"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
if nw.Scope == "swarm" {
if err = n.cluster.RemoveNetwork(nw.ID); err != nil {
return err
}
} else {
if err := n.backend.DeleteNetwork(nw.ID); err != nil {
return err
}
if err := n.backend.DeleteNetwork(vars["id"]); err != nil {
return err
}
w.WriteHeader(http.StatusNoContent)
return nil
}
func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
r := &types.NetworkResource{}
if nw == nil {
return r
}
info := nw.Info()
r.Name = nw.Name()
r.ID = nw.ID()
r.Created = info.Created()
r.Scope = info.Scope()
if n.cluster.IsManager() {
if _, err := n.cluster.GetNetwork(nw.ID()); err == nil {
r.Scope = "swarm"
}
} else if info.Dynamic() {
r.Scope = "swarm"
}
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
r.Attachable = info.Attachable()
r.Options = info.DriverOptions()
r.Containers = make(map[string]types.EndpointResource)
buildIpamResources(r, info)
r.Labels = info.Labels()
peers := info.Peers()
if len(peers) != 0 {
r.Peers = buildPeerInfoResources(peers)
}
return r
}
func (n *networkRouter) buildDetailedNetworkResources(nw libnetwork.Network, verbose bool) *types.NetworkResource {
if nw == nil {
return &types.NetworkResource{}
}
r := n.buildNetworkResource(nw)
epl := nw.Endpoints()
for _, e := range epl {
ei := e.Info()
if ei == nil {
continue
}
sb := ei.Sandbox()
tmpID := e.ID()
key := "ep-" + tmpID
if sb != nil {
key = sb.ContainerID()
}
r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei)
}
if !verbose {
return r
}
services := nw.Info().Services()
r.Services = make(map[string]network.ServiceInfo)
for name, service := range services {
tasks := []network.Task{}
for _, t := range service.Tasks {
tasks = append(tasks, network.Task{
Name: t.Name,
EndpointID: t.EndpointID,
EndpointIP: t.EndpointIP,
Info: t.Info,
})
}
r.Services[name] = network.ServiceInfo{
VIP: service.VIP,
Ports: service.Ports,
Tasks: tasks,
LocalLBIndex: service.LocalLBIndex,
}
}
return r
}
func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo {
peerInfo := make([]network.PeerInfo, 0, len(peers))
for _, peer := range peers {
peerInfo = append(peerInfo, network.PeerInfo{
Name: peer.Name,
IP: peer.IP,
})
}
return peerInfo
}
func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) {
id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig()
ipv4Info, ipv6Info := nwInfo.IpamInfo()
r.IPAM.Driver = id
r.IPAM.Options = opts
r.IPAM.Config = []network.IPAMConfig{}
for _, ip4 := range ipv4conf {
if ip4.PreferredPool == "" {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip4.PreferredPool
iData.IPRange = ip4.SubPool
iData.Gateway = ip4.Gateway
iData.AuxAddress = ip4.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if len(r.IPAM.Config) == 0 {
for _, ip4Info := range ipv4Info {
iData := network.IPAMConfig{}
iData.Subnet = ip4Info.IPAMData.Pool.String()
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
hasIpv6Conf := false
for _, ip6 := range ipv6conf {
if ip6.PreferredPool == "" {
continue
}
hasIpv6Conf = true
iData := network.IPAMConfig{}
iData.Subnet = ip6.PreferredPool
iData.IPRange = ip6.SubPool
iData.Gateway = ip6.Gateway
iData.AuxAddress = ip6.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if !hasIpv6Conf {
for _, ip6Info := range ipv6Info {
iData := network.IPAMConfig{}
iData.Subnet = ip6Info.IPAMData.Pool.String()
iData.Gateway = ip6Info.IPAMData.Gateway.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
}
func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource {
er := types.EndpointResource{}
er.EndpointID = id
er.Name = name
ei := info
if ei == nil {
return er
}
if iface := ei.Iface(); iface != nil {
if mac := iface.MacAddress(); mac != nil {
er.MacAddress = mac.String()
}
if ip := iface.Address(); ip != nil && len(ip.IP) > 0 {
er.IPv4Address = ip.String()
}
if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 {
er.IPv6Address = ipv6.String()
}
}
return er
}
func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := httputils.ParseForm(r); err != nil {
return err
}
pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}
pruneReport, err := n.backend.NetworksPrune(ctx, pruneFilters)
pruneReport, err := n.backend.NetworksPrune(pruneFilters)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, pruneReport)
}
// findUniqueNetwork will search network across different scopes (both local and swarm).
// NOTE: This findUniqueNetwork is different from FindNetwork in the daemon.
// In case multiple networks have duplicate names, return error.
// First find based on full ID, return immediately once one is found.
// If a network appears both in swarm and local, assume it is in local first
// For full name and partial ID, save the result first, and process later
// in case multiple records was found based on the same term
// TODO (yongtang): should we wrap with version here for backward compatibility?
func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, error) {
listByFullName := map[string]types.NetworkResource{}
listByPartialID := map[string]types.NetworkResource{}
filter := filters.NewArgs(filters.Arg("idOrName", term))
nw, _ := n.backend.GetNetworks(filter, types.NetworkListConfig{Detailed: true})
for _, network := range nw {
if network.ID == term {
return network, nil
}
if network.Name == term && !network.Ingress {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByFullName[network.ID] = network
}
if strings.HasPrefix(network.ID, term) {
// No need to check the ID collision here as we are still in
// local scope and the network ID is unique in this scope.
listByPartialID[network.ID] = network
}
}
nr, _ := n.cluster.GetNetworks(filter)
for _, network := range nr {
if network.ID == term {
return network, nil
}
if network.Name == term {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByFullName) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByFullName[network.ID]; !ok {
listByFullName[network.ID] = network
}
}
if strings.HasPrefix(network.ID, term) {
// Check the ID collision as we are in swarm scope here, and
// the map (of the listByPartialID) may have already had a
// network with the same ID (from local scope previously)
if _, ok := listByPartialID[network.ID]; !ok {
listByPartialID[network.ID] = network
}
}
}
// Find based on full name, returns true only if no duplicates
if len(listByFullName) == 1 {
for _, v := range listByFullName {
return v, nil
}
}
if len(listByFullName) > 1 {
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName)))
}
// Find based on partial ID, returns true only if no duplicates
if len(listByPartialID) == 1 {
for _, v := range listByPartialID {
return v, nil
}
}
if len(listByPartialID) > 1 {
return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
}
return types.NetworkResource{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
}

View File

@@ -1,14 +1,13 @@
package plugin // import "github.com/docker/docker/api/server/router/plugin"
package plugin
import (
"context"
"io"
"net/http"
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/plugin"
"golang.org/x/net/context"
)
// Backend for Plugin
@@ -20,7 +19,7 @@ type Backend interface {
Remove(name string, config *enginetypes.PluginRmConfig) error
Set(name string, args []string) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error

View File

@@ -1,4 +1,4 @@
package plugin // import "github.com/docker/docker/api/server/router/plugin"
package plugin
import "github.com/docker/docker/api/server/router"
@@ -28,11 +28,11 @@ func (r *pluginRouter) initRoutes() {
router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin),
router.NewGetRoute("/plugins/privileges", r.getPrivileges),
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin),
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH?
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin),
router.NewPostRoute("/plugins/pull", r.pullPlugin),
router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin),
router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin),
router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)),
router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)),
router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin)),
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin),
router.NewPostRoute("/plugins/create", r.createPlugin),
}

View File

@@ -1,7 +1,6 @@
package plugin // import "github.com/docker/docker/api/server/router/plugin"
package plugin
import (
"context"
"encoding/base64"
"encoding/json"
"net/http"
@@ -15,6 +14,7 @@ import (
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/streamformatter"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) {
@@ -94,8 +94,12 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
}
var privileges types.PluginPrivileges
if err := httputils.ReadJSON(r, &privileges); err != nil {
return err
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&privileges); err != nil {
return errors.Wrap(err, "failed to parse privileges")
}
if dec.More() {
return errors.New("invalid privileges")
}
metaHeaders, authConfig := parseHeaders(r.Header)
@@ -117,7 +121,7 @@ func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
@@ -129,8 +133,12 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
}
var privileges types.PluginPrivileges
if err := httputils.ReadJSON(r, &privileges); err != nil {
return err
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&privileges); err != nil {
return errors.Wrap(err, "failed to parse privileges")
}
if dec.More() {
return errors.New("invalid privileges")
}
metaHeaders, authConfig := parseHeaders(r.Header)
@@ -152,7 +160,7 @@ func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
@@ -201,7 +209,7 @@ func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter,
if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil {
return err
}
// TODO: send progress bar
//TODO: send progress bar
w.WriteHeader(http.StatusNoContent)
return nil
}
@@ -260,14 +268,14 @@ func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r
if !output.Flushed() {
return err
}
_, _ = output.Write(streamformatter.FormatError(err))
output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err))
}
return nil
}
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var args []string
if err := httputils.ReadJSON(r, &args); err != nil {
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
return err
}
if err := pr.backend.Set(vars["name"], args); err != nil {
@@ -282,7 +290,7 @@ func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter,
return err
}
pluginFilters, err := filters.FromJSON(r.Form.Get("filters"))
pluginFilters, err := filters.FromParam(r.Form.Get("filters"))
if err != nil {
return err
}

Some files were not shown because too many files have changed in this diff Show More