mirror of
https://github.com/moby/moby.git
synced 2026-01-11 18:51:37 +00:00
modernize: Use range int
Added in Go 1.22 Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
This commit is contained in:
@@ -33,7 +33,7 @@ func (s *DockerBenchmarkSuite) BenchmarkConcurrentContainerActions(c *testing.B)
|
||||
outerGroup.Add(maxConcurrency)
|
||||
chErr := make(chan error, numIterations*2*maxConcurrency)
|
||||
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
for range maxConcurrency {
|
||||
go func() {
|
||||
defer outerGroup.Done()
|
||||
innerGroup := &sync.WaitGroup{}
|
||||
@@ -41,7 +41,7 @@ func (s *DockerBenchmarkSuite) BenchmarkConcurrentContainerActions(c *testing.B)
|
||||
|
||||
go func() {
|
||||
defer innerGroup.Done()
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
args := []string{"run", "-d", "busybox"}
|
||||
args = append(args, sleepCommandForDaemonPlatform()...)
|
||||
out, _, err := dockerCmdWithError(args...)
|
||||
@@ -88,7 +88,7 @@ func (s *DockerBenchmarkSuite) BenchmarkConcurrentContainerActions(c *testing.B)
|
||||
|
||||
go func() {
|
||||
defer innerGroup.Done()
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
out, _, err := dockerCmdWithError("ps")
|
||||
if err != nil {
|
||||
chErr <- errors.New(out)
|
||||
|
||||
@@ -200,7 +200,7 @@ func (d *Daemon) CmdRetryOutOfSequence(args ...string) (string, error) {
|
||||
err error
|
||||
)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
output, err = d.Cmd(args...)
|
||||
// error, no error, whatever. if we don't have "update out of
|
||||
// sequence", we don't retry, we just return.
|
||||
|
||||
@@ -92,7 +92,7 @@ func (s *DockerAPISuite) TestExecResizeImmediatelyAfterExecStart(c *testing.T) {
|
||||
ch = make(chan error, n)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -125,7 +125,7 @@ func (s *DockerAPISuite) TestLogsAPIUntilFutureFollow(c *testing.T) {
|
||||
go func() {
|
||||
bufReader := bufio.NewReader(reader)
|
||||
defer reader.Close()
|
||||
for i := 0; i < untilSecs; i++ {
|
||||
for range untilSecs {
|
||||
out, _, err := bufReader.ReadLine()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
@@ -148,7 +148,7 @@ func (s *DockerAPISuite) TestLogsAPIUntilFutureFollow(c *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < untilSecs; i++ {
|
||||
for range untilSecs {
|
||||
select {
|
||||
case l := <-chLog:
|
||||
assert.NilError(c, l.err)
|
||||
|
||||
@@ -149,7 +149,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
for i := range nodeCount {
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
@@ -305,7 +305,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
for i := range nodeCount {
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
@@ -344,7 +344,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
for i := range nodeCount {
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
@@ -397,7 +397,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
for i := range nodeCount {
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
@@ -494,7 +494,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *testing.T) {
|
||||
|
||||
const nodeCount = 3
|
||||
var daemons [nodeCount]*daemon.Daemon
|
||||
for i := 0; i < nodeCount; i++ {
|
||||
for i := range nodeCount {
|
||||
daemons[i] = s.AddDaemon(ctx, c, true, i == 0)
|
||||
}
|
||||
// wait for nodes ready
|
||||
|
||||
@@ -816,7 +816,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
|
||||
mCount, wCount := 5, 1
|
||||
|
||||
var nodes []*daemon.Daemon
|
||||
for i := 0; i < mCount; i++ {
|
||||
for range mCount {
|
||||
manager := s.AddDaemon(ctx, c, true, true)
|
||||
info := manager.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, true)
|
||||
@@ -824,7 +824,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) {
|
||||
nodes = append(nodes, manager)
|
||||
}
|
||||
|
||||
for i := 0; i < wCount; i++ {
|
||||
for range wCount {
|
||||
worker := s.AddDaemon(ctx, c, true, false)
|
||||
info := worker.SwarmInfo(ctx, c)
|
||||
assert.Equal(c, info.ControlAvailable, false)
|
||||
@@ -960,7 +960,7 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
||||
currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
|
||||
|
||||
// rotate multiple times
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
var err error
|
||||
var cert, key []byte
|
||||
if i%2 != 0 {
|
||||
@@ -980,7 +980,7 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
||||
|
||||
// poll to make sure update succeeds
|
||||
var clusterTLSInfo swarm.TLSInfo
|
||||
for j := 0; j < 18; j++ {
|
||||
for range 18 {
|
||||
info := m.SwarmInfo(ctx, c)
|
||||
|
||||
// the desired CA cert and key is always redacted
|
||||
@@ -1002,7 +1002,7 @@ func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) {
|
||||
}
|
||||
// could take another second or two for the nodes to trust the new roots after they've all gotten
|
||||
// new TLS certificates
|
||||
for j := 0; j < 18; j++ {
|
||||
for range 18 {
|
||||
mInfo := m.GetNode(ctx, c, m.NodeID()).Description.TLSInfo
|
||||
wInfo := m.GetNode(ctx, c, w.NodeID()).Description.TLSInfo
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ func (s *DockerCLIAttachSuite) TestAttachMultipleAndRestart(c *testing.T) {
|
||||
close(startDone)
|
||||
}()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
go func() {
|
||||
cmd := exec.Command(dockerBinary, "attach", "attacher")
|
||||
|
||||
|
||||
@@ -1590,8 +1590,8 @@ func (s *DockerCLIBuildSuite) TestBuildExposeMorePorts(c *testing.T) {
|
||||
portList := make([]string, 50)
|
||||
line := make([]string, 100)
|
||||
expectedPorts := make([]int, len(portList)*len(line))
|
||||
for i := 0; i < len(portList); i++ {
|
||||
for j := 0; j < len(line); j++ {
|
||||
for i := range portList {
|
||||
for j := range line {
|
||||
p := i*len(line) + j + 1
|
||||
line[j] = strconv.Itoa(p)
|
||||
expectedPorts[p-1] = p
|
||||
|
||||
@@ -1127,7 +1127,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *testing.T)
|
||||
maxChildren := 10
|
||||
chErr := make(chan error, maxChildren)
|
||||
|
||||
for i := 0; i < maxChildren; i++ {
|
||||
for i := range maxChildren {
|
||||
wg.Add(1)
|
||||
name := fmt.Sprintf("test%d", i)
|
||||
|
||||
|
||||
@@ -283,7 +283,7 @@ func (s *DockerCLIExecSuite) TestExecCgroup(c *testing.T) {
|
||||
var execCgroups []sort.StringSlice
|
||||
errChan := make(chan error, 5)
|
||||
// exec a few times concurrently to get consistent failure
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -337,7 +337,7 @@ func (s *DockerCLIExecSuite) TestExecInspectID(c *testing.T) {
|
||||
|
||||
// Give the exec 10 chances/seconds to start then give up and stop the test
|
||||
tries := 10
|
||||
for i := 0; i < tries; i++ {
|
||||
for i := range tries {
|
||||
// Since its still running we should see exec as part of the container
|
||||
out = strings.TrimSpace(inspectField(c, id, "ExecIDs"))
|
||||
|
||||
@@ -360,7 +360,7 @@ func (s *DockerCLIExecSuite) TestExecInspectID(c *testing.T) {
|
||||
cmd.Wait()
|
||||
|
||||
// Give the exec 10 chances/seconds to stop then give up and stop the test
|
||||
for i := 0; i < tries; i++ {
|
||||
for i := range tries {
|
||||
// Since its still running we should see exec as part of the container
|
||||
out = strings.TrimSpace(inspectField(c, id, "ExecIDs"))
|
||||
|
||||
|
||||
@@ -547,7 +547,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *test
|
||||
s.d.Start(c)
|
||||
assert.Equal(c, s.ec.caps, 0)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, fmt.Sprintf("test%d", i))
|
||||
assert.NilError(c, err, out)
|
||||
assert.Equal(c, s.ec.caps, 1)
|
||||
|
||||
@@ -62,7 +62,7 @@ LABEL label.Z="Z"`))
|
||||
actualValues := strings.Split(out, "\n")[1:27]
|
||||
expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"}
|
||||
|
||||
for i := 0; i < 26; i++ {
|
||||
for i := range 26 {
|
||||
echoValue := fmt.Sprintf("LABEL label.%s=%s", expectedValues[i], expectedValues[i])
|
||||
actualValue := actualValues[i]
|
||||
assert.Assert(c, is.Contains(actualValue, echoValue))
|
||||
|
||||
@@ -1123,7 +1123,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c
|
||||
s.d.StartWithBusybox(ctx, c)
|
||||
|
||||
// Run a few containers on host network
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
cName := fmt.Sprintf("hostc-%d", i)
|
||||
out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top")
|
||||
assert.NilError(c, err, out)
|
||||
@@ -1138,7 +1138,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c
|
||||
s.d.Start(c)
|
||||
|
||||
// make sure all the containers are up and running
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
err := s.d.WaitRun(fmt.Sprintf("hostc-%d", i))
|
||||
assert.NilError(c, err)
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
||||
testRange := func() {
|
||||
// host port ranges used
|
||||
IDs := make([]string, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
out = cli.DockerCmd(c, "run", "-d", "-p", "9090-9092:80", "busybox", "top").Stdout()
|
||||
IDs[i] = strings.TrimSpace(out)
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *DockerCLIPortSuite) TestPortList(c *testing.T) {
|
||||
// Exhausted port range did not return an error
|
||||
assert.Assert(c, err != nil, "out: %s", out)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
cli.DockerCmd(c, "rm", "-f", IDs[i])
|
||||
}
|
||||
}
|
||||
@@ -182,7 +182,7 @@ func assertPortList(t *testing.T, out string, expected []string) {
|
||||
return old
|
||||
}
|
||||
|
||||
for i := 0; i < len(expected); i++ {
|
||||
for i := range expected {
|
||||
if lines[i] == expected[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func assertContainerList(out string, expected []string) bool {
|
||||
}
|
||||
|
||||
containerIDIndex := strings.Index(lines[0], "CONTAINER ID")
|
||||
for i := 0; i < len(expected); i++ {
|
||||
for i := range expected {
|
||||
foundID := lines[i+1][containerIDIndex : containerIDIndex+12]
|
||||
if foundID != expected[i][:12] {
|
||||
return false
|
||||
|
||||
@@ -806,7 +806,7 @@ func (s *DockerCLIRunSuite) TestRunTwoConcurrentContainers(c *testing.T) {
|
||||
group.Add(2)
|
||||
|
||||
errChan := make(chan error, 2)
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
go func() {
|
||||
defer group.Done()
|
||||
_, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime)
|
||||
@@ -2857,7 +2857,7 @@ func (s *DockerCLIRunSuite) TestRunUnshareProc(c *testing.T) {
|
||||
}()
|
||||
|
||||
var retErr error
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
err := <-errChan
|
||||
if retErr == nil && err != nil {
|
||||
retErr = err
|
||||
@@ -4111,7 +4111,7 @@ func (s *DockerCLIRunSuite) TestSlowStdinClosing(c *testing.T) {
|
||||
skip.If(c, testEnv.GitHubActions())
|
||||
}
|
||||
const repeat = 3 // regression happened 50% of the time
|
||||
for i := 0; i < repeat; i++ {
|
||||
for i := range repeat {
|
||||
c.Run(strconv.Itoa(i), func(c *testing.T) {
|
||||
cmd := icmd.Cmd{
|
||||
Command: []string{dockerBinary, "run", "--rm", "-i", "busybox", "cat"},
|
||||
|
||||
@@ -203,7 +203,7 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for range 3 {
|
||||
msg := <-ch
|
||||
assert.NilError(c, msg.err)
|
||||
assert.Assert(c, is.Contains(string(msg.data), "log test"))
|
||||
|
||||
@@ -1486,7 +1486,7 @@ func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *testing.T) {
|
||||
ctx := testutil.GetContext(c)
|
||||
d := s.AddDaemon(ctx, c, true, true)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
// set to lock
|
||||
outs, err := d.Cmd("swarm", "update", "--autolock")
|
||||
assert.Assert(c, err == nil, "out: %v", outs)
|
||||
@@ -1790,7 +1790,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *testing.T) {
|
||||
|
||||
// Verify that back to back join/leave does not cause panics
|
||||
d1 := s.AddDaemon(ctx, c, false, false)
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
out, err = d1.Cmd("swarm", "join", "--token", token, d.SwarmListenAddr())
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.TrimSpace(out) != "")
|
||||
@@ -1808,7 +1808,7 @@ func waitForEvent(t *testing.T, d *daemon.Daemon, since string, filter string, e
|
||||
return ""
|
||||
}
|
||||
var out string
|
||||
for i := 0; i < retry; i++ {
|
||||
for i := range retry {
|
||||
until := daemonUnixTime(t)
|
||||
var err error
|
||||
if filter != "" {
|
||||
|
||||
Reference in New Issue
Block a user