mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
Fix bunch of typos
Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
This commit is contained in:
@@ -1287,7 +1287,7 @@ by another client (#15489)
|
|||||||
#### Security
|
#### Security
|
||||||
- Fix tar breakout vulnerability
|
- Fix tar breakout vulnerability
|
||||||
* Extractions are now sandboxed chroot
|
* Extractions are now sandboxed chroot
|
||||||
- Security options are no longer comitted to images
|
- Security options are no longer committed to images
|
||||||
|
|
||||||
#### Runtime
|
#### Runtime
|
||||||
- Fix deadlock in `docker ps -f exited=1`
|
- Fix deadlock in `docker ps -f exited=1`
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ However, there might be a way to implement that feature *on top of* Docker.
|
|||||||
group is for contributors and other people contributing to the Docker project.
|
group is for contributors and other people contributing to the Docker project.
|
||||||
You can join them without a google account by sending an email to
|
You can join them without a google account by sending an email to
|
||||||
<a href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
|
<a href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
|
||||||
After receiving the join-request message, you can simply reply to that to confirm the subscribtion.
|
After receiving the join-request message, you can simply reply to that to confirm the subscription.
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
|||||||
@@ -4031,7 +4031,7 @@ paths:
|
|||||||
post:
|
post:
|
||||||
summary: "Build an image"
|
summary: "Build an image"
|
||||||
description: |
|
description: |
|
||||||
Build an image from a tar achive with a Dockerfile in it.
|
Build an image from a tar archive with a Dockerfile in it.
|
||||||
|
|
||||||
The Dockerfile specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the Dockerfile reference for more information](https://docs.docker.com/engine/reference/builder/).
|
The Dockerfile specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the Dockerfile reference for more information](https://docs.docker.com/engine/reference/builder/).
|
||||||
|
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ type NodeDescription struct {
|
|||||||
Engine EngineDescription `json:",omitempty"`
|
Engine EngineDescription `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Platform represents the platfrom (Arch/OS).
|
// Platform represents the platform (Arch/OS).
|
||||||
type Platform struct {
|
type Platform struct {
|
||||||
Architecture string `json:",omitempty"`
|
Architecture string `json:",omitempty"`
|
||||||
OS string `json:",omitempty"`
|
OS string `json:",omitempty"`
|
||||||
|
|||||||
@@ -410,7 +410,7 @@ func (b *Builder) processImageFrom(img builder.Image) error {
|
|||||||
fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
|
fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the ONBUILD triggers, and remove them from the config, since the config will be comitted.
|
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
|
||||||
onBuildTriggers := b.runConfig.OnBuild
|
onBuildTriggers := b.runConfig.OnBuild
|
||||||
b.runConfig.OnBuild = []string{}
|
b.runConfig.OnBuild = []string{}
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func runLogout(dockerCli *command.DockerCli, serverAddress string) error {
|
|||||||
)
|
)
|
||||||
if !isDefaultRegistry {
|
if !isDefaultRegistry {
|
||||||
hostnameAddress = registry.ConvertToHostname(serverAddress)
|
hostnameAddress = registry.ConvertToHostname(serverAddress)
|
||||||
// the tries below are kept for backward compatibily where a user could have
|
// the tries below are kept for backward compatibility where a user could have
|
||||||
// saved the registry in one of the following format.
|
// saved the registry in one of the following format.
|
||||||
regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress)
|
regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ func newJoinTokenCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !quiet {
|
if !quiet {
|
||||||
fmt.Fprintf(dockerCli.Out(), "Succesfully rotated %s join token.\n\n", args[0])
|
fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", args[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func (c *memoryStore) First(filter StoreFilter) *Container {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ApplyAll calls the reducer function with every container in the store.
|
// ApplyAll calls the reducer function with every container in the store.
|
||||||
// This operation is asyncronous in the memory store.
|
// This operation is asynchronous in the memory store.
|
||||||
// NOTE: Modifications to the store MUST NOT be done by the StoreReducer.
|
// NOTE: Modifications to the store MUST NOT be done by the StoreReducer.
|
||||||
func (c *memoryStore) ApplyAll(apply StoreReducer) {
|
func (c *memoryStore) ApplyAll(apply StoreReducer) {
|
||||||
wg := new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
|
|||||||
@@ -10,5 +10,5 @@ Compile
|
|||||||
## inside build container
|
## inside build container
|
||||||
$ go build contrib/docker-device-tool/device_tool.go
|
$ go build contrib/docker-device-tool/device_tool.go
|
||||||
|
|
||||||
# if devicemapper version is old and compliation fails, compile with `libdm_no_deferred_remove` tag
|
# if devicemapper version is old and compilation fails, compile with `libdm_no_deferred_remove` tag
|
||||||
$ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go
|
$ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func validateMounts(mounts []api.Mount) error {
|
|||||||
// The checks on abs paths are required due to the container API confusing
|
// The checks on abs paths are required due to the container API confusing
|
||||||
// volume mounts as bind mounts when the source is absolute (and vice-versa)
|
// volume mounts as bind mounts when the source is absolute (and vice-versa)
|
||||||
// See #25253
|
// See #25253
|
||||||
// TODO: This is probably not neccessary once #22373 is merged
|
// TODO: This is probably not necessary once #22373 is merged
|
||||||
case api.MountTypeBind:
|
case api.MountTypeBind:
|
||||||
if !filepath.IsAbs(mount.Source) {
|
if !filepath.IsAbs(mount.Source) {
|
||||||
return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source)
|
return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source)
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
|
|||||||
err := <-wait
|
err := <-wait
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if err = daemon.containerStart(c, "", "", false); err != nil {
|
if err = daemon.containerStart(c, "", "", false); err != nil {
|
||||||
logrus.Debugf("failed to restart contianer: %+v", err)
|
logrus.Debugf("failed to restart container: %+v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
|||||||
mp.Name = v.Name()
|
mp.Name = v.Name()
|
||||||
mp.Driver = v.DriverName()
|
mp.Driver = v.DriverName()
|
||||||
|
|
||||||
// only use the cached path here since getting the path is not neccessary right now and calling `Path()` may be slow
|
// only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow
|
||||||
if cv, ok := v.(interface {
|
if cv, ok := v.(interface {
|
||||||
CachedPath() string
|
CachedPath() string
|
||||||
}); ok {
|
}); ok {
|
||||||
|
|||||||
@@ -1273,7 +1273,7 @@ Server containers or Hyper-V containers. For Hyper-V containers, the engine
|
|||||||
is, for architectural reasons, unable to create the directory if it does not
|
is, for architectural reasons, unable to create the directory if it does not
|
||||||
previously exist. For Windows Server containers, the directory is created
|
previously exist. For Windows Server containers, the directory is created
|
||||||
if it does not exist. Hence, for consistency between Windows Server and
|
if it does not exist. Hence, for consistency between Windows Server and
|
||||||
Hyper-V containers, it is strongly recommended to include an explict instruction
|
Hyper-V containers, it is strongly recommended to include an explicit instruction
|
||||||
to create the directory in the Dockerfile. For example:
|
to create the directory in the Dockerfile. For example:
|
||||||
|
|
||||||
# escape=`
|
# escape=`
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ Use the `--rotate` flag to generate a new join token for the specified role:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker swarm join-token --rotate worker
|
$ docker swarm join-token --rotate worker
|
||||||
Succesfully rotated worker join token.
|
Successfully rotated worker join token.
|
||||||
|
|
||||||
To add a worker to this swarm, run the following command:
|
To add a worker to this swarm, run the following command:
|
||||||
|
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; the
|
|||||||
echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||||
echo "# GITCOMMIT = $GITCOMMIT"
|
echo "# GITCOMMIT = $GITCOMMIT"
|
||||||
echo "# The version you are building is listed as unsupported because"
|
echo "# The version you are building is listed as unsupported because"
|
||||||
echo "# there are some files in the git repository that are in an uncommited state."
|
echo "# there are some files in the git repository that are in an uncommitted state."
|
||||||
echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version."
|
echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version."
|
||||||
echo "# Here is the current list:"
|
echo "# Here is the current list:"
|
||||||
git status --porcelain --untracked-files=no
|
git status --porcelain --untracked-files=no
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ release_build() {
|
|||||||
s3Os=Linux
|
s3Os=Linux
|
||||||
;;
|
;;
|
||||||
windows)
|
windows)
|
||||||
# this is windows use the .zip and .exe extentions for the files.
|
# this is windows use the .zip and .exe extensions for the files.
|
||||||
s3Os=Windows
|
s3Os=Windows
|
||||||
zipExt=".zip"
|
zipExt=".zip"
|
||||||
binaryExt=".exe"
|
binaryExt=".exe"
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ This specification uses the following terms:
|
|||||||
<code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
|
<code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
|
||||||
Since the configuration JSON that gets hashed references hashes of each
|
Since the configuration JSON that gets hashed references hashes of each
|
||||||
layer in the image, this formulation of the ImageID makes images
|
layer in the image, this formulation of the ImageID makes images
|
||||||
content-addresable.
|
content-addressable.
|
||||||
</dd>
|
</dd>
|
||||||
<dt>
|
<dt>
|
||||||
Tag
|
Tag
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ This specification uses the following terms:
|
|||||||
<code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
|
<code>sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9</code>.
|
||||||
Since the configuration JSON that gets hashed references hashes of each
|
Since the configuration JSON that gets hashed references hashes of each
|
||||||
layer in the image, this formulation of the ImageID makes images
|
layer in the image, this formulation of the ImageID makes images
|
||||||
content-addresable.
|
content-addressable.
|
||||||
</dd>
|
</dd>
|
||||||
<dt>
|
<dt>
|
||||||
Tag
|
Tag
|
||||||
|
|||||||
@@ -268,7 +268,7 @@ func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) {
|
|||||||
|
|
||||||
containerID := strings.TrimSpace(out)
|
containerID := strings.TrimSpace(out)
|
||||||
|
|
||||||
// Create a temp directory to hold a test file nested in a direcotry.
|
// Create a temp directory to hold a test file nested in a directory.
|
||||||
testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-")
|
testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-")
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
defer os.RemoveAll(testDir)
|
defer os.RemoveAll(testDir)
|
||||||
|
|||||||
@@ -4201,7 +4201,7 @@ func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Prepare a source directory with file in it. We will bind mount this
|
// Prepare a source directory with file in it. We will bind mount this
|
||||||
// direcotry and see if file shows up.
|
// directory and see if file shows up.
|
||||||
tmpDir2, err := ioutil.TempDir("", "volume-source2")
|
tmpDir2, err := ioutil.TempDir("", "volume-source2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatal(err)
|
c.Fatal(err)
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ two memory nodes.
|
|||||||
Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex:
|
Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex:
|
||||||
Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks.
|
Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks.
|
||||||
|
|
||||||
The sum of all runtimes across containers cannot exceed the amount alotted to the parent cgroup.
|
The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup.
|
||||||
|
|
||||||
**--device**=[]
|
**--device**=[]
|
||||||
Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)
|
Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ to the quota you specify.
|
|||||||
Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex:
|
Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex:
|
||||||
Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks.
|
Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks.
|
||||||
|
|
||||||
The sum of all runtimes across containers cannot exceed the amount alotted to the parent cgroup.
|
The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup.
|
||||||
|
|
||||||
**-d**, **--detach**=*true*|*false*
|
**-d**, **--detach**=*true*|*false*
|
||||||
Detached mode: run the container in the background and print the new container ID. The default is *false*.
|
Detached mode: run the container in the background and print the new container ID. The default is *false*.
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ a running container with kernel memory initialized.
|
|||||||
Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex:
|
Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex:
|
||||||
Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks.
|
Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks.
|
||||||
|
|
||||||
The sum of all runtimes across containers cannot exceed the amount alotted to the parent cgroup.
|
The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup.
|
||||||
|
|
||||||
**--cpuset-cpus**=""
|
**--cpuset-cpus**=""
|
||||||
CPUs in which to allow execution (0-3, 0,1)
|
CPUs in which to allow execution (0-3, 0,1)
|
||||||
|
|||||||
@@ -308,7 +308,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if it's not a directory and has more than 1 link,
|
// if it's not a directory and has more than 1 link,
|
||||||
// it's hardlinked, so set the type flag accordingly
|
// it's hard linked, so set the type flag accordingly
|
||||||
if !fi.IsDir() && hasHardlinks(fi) {
|
if !fi.IsDir() && hasHardlinks(fi) {
|
||||||
// a link should have a name that it links too
|
// a link should have a name that it links too
|
||||||
// and that linked name should be first in the tar archive
|
// and that linked name should be first in the tar archive
|
||||||
|
|||||||
Reference in New Issue
Block a user