libnetwork/d/overlay: un-embed mutexes

It is easier to find all references when they are struct fields rather
than embedded structs.

Signed-off-by: Cory Snider <csnider@mirantis.com>
(cherry picked from commit 74713e1a7d)
Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
Cory Snider
2025-05-22 19:01:07 -04:00
parent e604d70e22
commit 480dfaef06
5 changed files with 56 additions and 56 deletions

View File

@@ -97,12 +97,12 @@ type encrNode struct {
type encrMap struct {
nodes map[netip.Addr]encrNode
sync.Mutex
mu sync.Mutex
}
func (e *encrMap) String() string {
e.Lock()
defer e.Unlock()
e.mu.Lock()
defer e.mu.Unlock()
b := new(bytes.Buffer)
for k, v := range e.nodes {
b.WriteString("\n")
@@ -152,12 +152,12 @@ func (d *driver) setupEncryption(remoteIP netip.Addr) error {
}
}
d.secMap.Lock()
d.secMap.mu.Lock()
node := d.secMap.nodes[remoteIP]
node.spi = indices
node.count++
d.secMap.nodes[remoteIP] = node
d.secMap.Unlock()
d.secMap.mu.Unlock()
return nil
}
@@ -166,8 +166,8 @@ func (d *driver) removeEncryption(remoteIP netip.Addr) error {
log.G(context.TODO()).Debugf("removeEncryption(%s)", remoteIP)
spi := func() []spi {
d.secMap.Lock()
defer d.secMap.Unlock()
d.secMap.mu.Lock()
defer d.secMap.mu.Unlock()
node := d.secMap.nodes[remoteIP]
if node.count == 1 {
delete(d.secMap.nodes, remoteIP)
@@ -451,7 +451,7 @@ func buildAeadAlgo(k *key, s int) *netlink.XfrmStateAlgo {
}
func (d *driver) secMapWalk(f func(netip.Addr, []spi) ([]spi, bool)) error {
d.secMap.Lock()
d.secMap.mu.Lock()
for rIP, node := range d.secMap.nodes {
idxs, stop := f(rIP, node.spi)
if idxs != nil {
@@ -461,7 +461,7 @@ func (d *driver) secMapWalk(f func(netip.Addr, []spi) ([]spi, bool)) error {
break
}
}
d.secMap.Unlock()
d.secMap.mu.Unlock()
return nil
}
@@ -469,10 +469,10 @@ func (d *driver) setKeys(keys []*key) error {
// Remove any stale policy, state
clearEncryptionStates()
// Accept the encryption keys and clear any stale encryption map
d.Lock()
d.mu.Lock()
d.keys = keys
d.secMap = &encrMap{nodes: map[netip.Addr]encrNode{}}
d.Unlock()
d.mu.Unlock()
log.G(context.TODO()).Debugf("Initial encryption keys: %v", keys)
return nil
}
@@ -492,8 +492,8 @@ func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
aIP = d.advertiseAddress
)
d.Lock()
defer d.Unlock()
d.mu.Lock()
defer d.mu.Unlock()
// add new
if newKey != nil {

View File

@@ -26,22 +26,22 @@ type endpoint struct {
}
func (n *network) endpoint(eid string) *endpoint {
n.Lock()
defer n.Unlock()
n.mu.Lock()
defer n.mu.Unlock()
return n.endpoints[eid]
}
func (n *network) addEndpoint(ep *endpoint) {
n.Lock()
n.mu.Lock()
n.endpoints[ep.id] = ep
n.Unlock()
n.mu.Unlock()
}
func (n *network) deleteEndpoint(eid string) {
n.Lock()
n.mu.Lock()
delete(n.endpoints, eid)
n.Unlock()
n.mu.Unlock()
}
func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error {

View File

@@ -62,7 +62,7 @@ type network struct {
subnets []*subnet
secure bool
mtu int
sync.Mutex
mu sync.Mutex
}
func init() {
@@ -150,8 +150,8 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d
n.subnets = append(n.subnets, s)
}
d.Lock()
defer d.Unlock()
d.mu.Lock()
defer d.mu.Unlock()
if d.networks[n.id] != nil {
return fmt.Errorf("attempt to create overlay network %v that already exists", n.id)
}
@@ -185,12 +185,12 @@ func (d *driver) DeleteNetwork(nid string) error {
return err
}
d.Lock()
d.mu.Lock()
// Only perform a peer flush operation (if required) AFTER unlocking
// the driver lock to avoid deadlocking w/ the peerDB.
var doPeerFlush bool
defer func() {
d.Unlock()
d.mu.Unlock()
if doPeerFlush {
d.peerFlush(nid)
}
@@ -251,14 +251,14 @@ func (n *network) joinSandbox(s *subnet, incJoinCount bool) error {
// the other will wait.
networkOnce.Do(populateVNITbl)
n.Lock()
n.mu.Lock()
// If initialization was successful then tell the peerDB to initialize the
// sandbox with all the peers previously received from networkdb. But only
// do this after unlocking the network. Otherwise we could deadlock with
// on the peerDB channel while peerDB is waiting for the network lock.
var doInitPeerDB bool
defer func() {
n.Unlock()
n.mu.Unlock()
if doInitPeerDB {
go n.driver.initSandboxPeerDB(n.id)
}
@@ -296,8 +296,8 @@ func (n *network) joinSandbox(s *subnet, incJoinCount bool) error {
}
func (n *network) leaveSandbox() {
n.Lock()
defer n.Unlock()
n.mu.Lock()
defer n.mu.Unlock()
n.joinCnt--
if n.joinCnt != 0 {
return
@@ -603,16 +603,16 @@ func (n *network) initSandbox() error {
}
func (d *driver) network(nid string) *network {
d.Lock()
d.mu.Lock()
n := d.networks[nid]
d.Unlock()
d.mu.Unlock()
return n
}
func (n *network) sandbox() *osl.Namespace {
n.Lock()
defer n.Unlock()
n.mu.Lock()
defer n.mu.Unlock()
return n.sbox
}

View File

@@ -37,7 +37,7 @@ type driver struct {
initOS sync.Once
keys []*key
peerOpMu sync.Mutex
sync.Mutex
mu sync.Mutex
}
// Register registers a new instance of the overlay driver.
@@ -90,10 +90,10 @@ func (d *driver) nodeJoin(data discoverapi.NodeDiscoveryData) error {
if !advAddr.IsValid() {
return fmt.Errorf("invalid discovery data")
}
d.Lock()
d.mu.Lock()
d.advertiseAddress = advAddr
d.bindAddress = bindAddr
d.Unlock()
d.mu.Unlock()
}
return nil
}

View File

@@ -31,33 +31,33 @@ func (p *peerEntry) isLocal() bool {
type peerMap struct {
mp setmatrix.SetMatrix[netip.Prefix, peerEntry]
sync.Mutex
mu sync.Mutex
}
type peerNetworkMap struct {
// map with key peerKey
mp map[string]*peerMap
sync.Mutex
mu sync.Mutex
}
func (d *driver) peerDbNetworkWalk(nid string, f func(netip.Prefix, peerEntry) bool) {
d.peerDb.Lock()
d.peerDb.mu.Lock()
pMap, ok := d.peerDb.mp[nid]
d.peerDb.Unlock()
d.peerDb.mu.Unlock()
if !ok {
return
}
mp := map[netip.Prefix]peerEntry{}
pMap.Lock()
pMap.mu.Lock()
for _, pKey := range pMap.mp.Keys() {
entryDBList, ok := pMap.mp.Get(pKey)
if ok {
mp[pKey] = entryDBList[0]
}
}
pMap.Unlock()
pMap.mu.Unlock()
for k, v := range mp {
if f(k, v) {
@@ -67,15 +67,15 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(netip.Prefix, peerEntry) b
}
func (d *driver) peerDbGet(nid string, peerIP netip.Prefix) (peerEntry, bool) {
d.peerDb.Lock()
d.peerDb.mu.Lock()
pMap, ok := d.peerDb.mp[nid]
d.peerDb.Unlock()
d.peerDb.mu.Unlock()
if !ok {
return peerEntry{}, false
}
pMap.Lock()
defer pMap.Unlock()
pMap.mu.Lock()
defer pMap.mu.Unlock()
c, _ := pMap.mp.Get(peerIP)
if len(c) == 0 {
return peerEntry{}, false
@@ -84,13 +84,13 @@ func (d *driver) peerDbGet(nid string, peerIP netip.Prefix) (peerEntry, bool) {
}
func (d *driver) peerDbAdd(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr) (bool, int) {
d.peerDb.Lock()
d.peerDb.mu.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
pMap = &peerMap{}
d.peerDb.mp[nid] = pMap
}
d.peerDb.Unlock()
d.peerDb.mu.Unlock()
pEntry := peerEntry{
eid: eid,
@@ -98,8 +98,8 @@ func (d *driver) peerDbAdd(nid, eid string, peerIP netip.Prefix, peerMac net.Har
vtep: vtep,
}
pMap.Lock()
defer pMap.Unlock()
pMap.mu.Lock()
defer pMap.mu.Unlock()
b, i := pMap.mp.Insert(peerIP, pEntry)
if i != 1 {
// Transient case, there is more than one endpoint that is using the same IP
@@ -111,13 +111,13 @@ func (d *driver) peerDbAdd(nid, eid string, peerIP netip.Prefix, peerMac net.Har
}
func (d *driver) peerDbDelete(nid, eid string, peerIP netip.Prefix, peerMac net.HardwareAddr, vtep netip.Addr) (bool, int) {
d.peerDb.Lock()
d.peerDb.mu.Lock()
pMap, ok := d.peerDb.mp[nid]
if !ok {
d.peerDb.Unlock()
d.peerDb.mu.Unlock()
return false, 0
}
d.peerDb.Unlock()
d.peerDb.mu.Unlock()
pEntry := peerEntry{
eid: eid,
@@ -125,8 +125,8 @@ func (d *driver) peerDbDelete(nid, eid string, peerIP netip.Prefix, peerMac net.
vtep: vtep,
}
pMap.Lock()
defer pMap.Unlock()
pMap.mu.Lock()
defer pMap.mu.Unlock()
b, i := pMap.mp.Remove(peerIP, pEntry)
if i != 0 {
// Transient case, there is more than one endpoint that is using the same IP
@@ -335,8 +335,8 @@ func (d *driver) deleteNeighbor(nid string, peerIP netip.Prefix, peerMac net.Har
func (d *driver) peerFlush(nid string) {
d.peerOpMu.Lock()
defer d.peerOpMu.Unlock()
d.peerDb.Lock()
defer d.peerDb.Unlock()
d.peerDb.mu.Lock()
defer d.peerDb.mu.Unlock()
_, ok := d.peerDb.mp[nid]
if !ok {
log.G(context.TODO()).Warnf("Peer flush operation failed: unable to find the peerDB for nid:%s", nid)