modernize: Use min built-in

Signed-off-by: Paweł Gronowski <pawel.gronowski@docker.com>
This commit is contained in:
Paweł Gronowski
2025-12-15 18:25:30 +01:00
parent 39c19d9161
commit e548a31d28
11 changed files with 13 additions and 52 deletions

View File

@@ -63,10 +63,7 @@ func TestV2MetadataService(t *testing.T) {
if err != nil {
t.Fatalf("error calling Get: %v", err)
}
expectedMetadataEntries := len(vec.metadata)
if expectedMetadataEntries > 50 {
expectedMetadataEntries = 50
}
expectedMetadataEntries := min(len(vec.metadata), 50)
if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
t.Fatal("Get returned incorrect layer ID")
}

View File

@@ -81,10 +81,7 @@ func PrefixCompare(a, b netip.Prefix) int {
// PrefixAfter returns the prefix of size 'sz' right after 'prev'.
func PrefixAfter(prev netip.Prefix, sz int) netip.Prefix {
s := sz
if prev.Bits() < sz {
s = prev.Bits()
}
s := min(prev.Bits(), sz)
addr := ipbits.Add(prev.Addr(), 1, uint(prev.Addr().BitLen()-s))
if addr.IsUnspecified() {
return netip.Prefix{}

View File

@@ -96,10 +96,7 @@ loop0:
}
// add new byte slice to the buffers slice and continue writing
nextCap := b.Cap() * 2
if nextCap > maxCap {
nextCap = maxCap
}
nextCap := min(b.Cap()*2, maxCap)
bp.buf = append(bp.buf, getBuffer(nextCap))
}
bp.wait.Broadcast()

View File

@@ -16,10 +16,7 @@ func compare(v1, v2 string) int {
otherTab = strings.Split(v2, ".")
)
maxVer := len(currTab)
if len(otherTab) > maxVer {
maxVer = len(otherTab)
}
maxVer := max(len(otherTab), len(currTab))
for i := 0; i < maxVer; i++ {
var currInt, otherInt int

View File

@@ -203,10 +203,7 @@ func (as *AddrSet) AddrsInPrefix(prefix netip.Prefix) (hi, lo uint64) {
}
func (as *AddrSet) getBitmap(addr netip.Addr) (*bitmap.Bitmap, netip.Prefix, error) {
bits := as.pool.Addr().BitLen() - as.pool.Bits()
if bits > maxBitsPerBitmap {
bits = maxBitsPerBitmap
}
bits := min(as.pool.Addr().BitLen()-as.pool.Bits(), maxBitsPerBitmap)
bmKey, err := addr.Prefix(as.pool.Addr().BitLen() - bits)
if err != nil {
return nil, netip.Prefix{}, err
@@ -220,10 +217,7 @@ func (as *AddrSet) getBitmap(addr netip.Addr) (*bitmap.Bitmap, netip.Prefix, err
}
func (as *AddrSet) addrsPerBitmap() uint64 {
bits := as.pool.Addr().BitLen() - as.pool.Bits()
if bits > maxBitsPerBitmap {
bits = maxBitsPerBitmap
}
bits := min(as.pool.Addr().BitLen()-as.pool.Bits(), maxBitsPerBitmap)
return uint64(1) << bits
}

View File

@@ -66,10 +66,7 @@ func (sbs *sbState) Index() uint64 {
return sbs.dbIndex
}
maxIndex := sb.dbIndex
if sbs.dbIndex > maxIndex {
maxIndex = sbs.dbIndex
}
maxIndex := max(sbs.dbIndex, sb.dbIndex)
return maxIndex
}

View File

@@ -134,10 +134,7 @@ func (daemon *Daemon) Containers(ctx context.Context, config *backend.ContainerL
// dispatch a set number of worker goroutines to do the jobs. We choose
// log2(numContainers) workers to avoid creating too many goroutines
// for large number of containers.
numWorkers := int(math.Log2(float64(numContainers)))
if numWorkers < 1 {
numWorkers = 1
}
numWorkers := max(int(math.Log2(float64(numContainers))), 1)
resultsMut := sync.Mutex{}
results := make([]containertypes.Summary, numContainers)

View File

@@ -78,10 +78,7 @@ func (c *Copier) copySrc(name string, src io.Reader) {
return
default:
// Work out how much more data we are okay with reading this time.
upto := n + readSize
if upto > cap(buf) {
upto = cap(buf)
}
upto := min(n+readSize, cap(buf))
// Try to read that data.
if upto > n {
read, err := src.Read(buf[n:upto])

View File

@@ -421,10 +421,7 @@ func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool)
messagesLen := len(messages)
for i := 0; i < messagesLen; i += l.postMessagesBatchSize {
upperBound := i + l.postMessagesBatchSize
if upperBound > messagesLen {
upperBound = messagesLen
}
upperBound := min(i+l.postMessagesBatchSize, messagesLen)
if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil {
log.G(ctx).WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs")

View File

@@ -103,10 +103,7 @@ func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines in
func newScanner(r SizeReaderAt, delim []byte) *scanner {
size := r.Size()
readSize := blockSize
if readSize > int(size) {
readSize = int(size)
}
readSize := min(blockSize, int(size))
// silly case...
if len(delim) >= readSize/2 {
readSize = len(delim)*2 + 2
@@ -178,10 +175,7 @@ func (s *scanner) Scan(ctx context.Context) bool {
idx := s.idx - len(s.delim)
if idx < 0 {
readSize := int(s.pos)
if readSize > len(s.buf) {
readSize = len(s.buf)
}
readSize := min(int(s.pos), len(s.buf))
if readSize < len(s.delim) {
return false

View File

@@ -210,10 +210,7 @@ func TestNewTailReader(t *testing.T) {
test := test
t.Parallel()
maxLen := len(test.data)
if maxLen > 10 {
maxLen = 10
}
maxLen := min(len(test.data), 10)
s := strings.Join(test.data, string(delim))
if len(test.data) > 0 {