From: syzbot <syzbot+b668da2bc4cb9670bf58@syzkaller.appspotmail.com>
To: linux-kernel@vger.kernel.org
Subject: Re: [syzbot] Re: [syzbot] [net?] possible deadlock in team_device_event (3)
Date: Fri, 02 Aug 2024 07:20:58 -0700 [thread overview]
Message-ID: <000000000000413882061eb4096f@google.com> (raw)
In-Reply-To: <0000000000004da3b0061808451e@google.com>
For archival purposes, forwarding an incoming command email to
linux-kernel@vger.kernel.org.
***
Subject: Re: [syzbot] [net?] possible deadlock in team_device_event (3)
Author: aha310510@gmail.com
#syz test git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
---
drivers/net/team/team_core.c | 22 ----------------------
1 file changed, 22 deletions(-)
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index ab1935a4aa2c..f7bab2d2a281 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -1668,7 +1668,6 @@ static void team_uninit(struct net_device *dev)
struct team_port *port;
struct team_port *tmp;
- mutex_lock(&team->lock);
list_for_each_entry_safe(port, tmp, &team->port_list, list)
team_port_del(team, port->dev);
@@ -1677,7 +1676,6 @@ static void team_uninit(struct net_device *dev)
team_mcast_rejoin_fini(team);
team_notify_peers_fini(team);
team_queue_override_fini(team);
- mutex_unlock(&team->lock);
netdev_change_features(dev);
lockdep_unregister_key(&team->team_lock_key);
}
@@ -1818,7 +1816,6 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
* Alhough this is reader, it's guarded by team lock. It's not possible
* to traverse list in reverse under rcu_read_lock
*/
- mutex_lock(&team->lock);
team->port_mtu_change_allowed = true;
list_for_each_entry(port, &team->port_list, list) {
err = dev_set_mtu(port->dev, new_mtu);
@@ -1829,7 +1826,6 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
}
}
team->port_mtu_change_allowed = false;
- mutex_unlock(&team->lock);
WRITE_ONCE(dev->mtu, new_mtu);
@@ -1839,7 +1835,6 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
list_for_each_entry_continue_reverse(port, &team->port_list, list)
dev_set_mtu(port->dev, dev->mtu);
team->port_mtu_change_allowed = false;
- mutex_unlock(&team->lock);
return err;
}
@@ -1893,20 +1888,17 @@ static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
* Alhough this is reader, it's guarded by team lock. It's not possible
* to traverse list in reverse under rcu_read_lock
*/
- mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
err = vlan_vid_add(port->dev, proto, vid);
if (err)
goto unwind;
}
- mutex_unlock(&team->lock);
return 0;
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
- mutex_unlock(&team->lock);
return err;
}
@@ -1916,10 +1908,8 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
struct team *team = netdev_priv(dev);
struct team_port *port;
- mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
- mutex_unlock(&team->lock);
return 0;
}
@@ -1941,9 +1931,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
- mutex_lock(&team->lock);
__team_netpoll_cleanup(team);
- mutex_unlock(&team->lock);
}
static int team_netpoll_setup(struct net_device *dev,
@@ -1953,7 +1941,6 @@ static int team_netpoll_setup(struct net_device *dev,
struct team_port *port;
int err = 0;
- mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
err = __team_port_enable_netpoll(port);
if (err) {
@@ -1961,7 +1948,6 @@ static int team_netpoll_setup(struct net_device *dev,
break;
}
}
- mutex_unlock(&team->lock);
return err;
}
#endif
@@ -1972,9 +1958,7 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
struct team *team = netdev_priv(dev);
int err;
- mutex_lock(&team->lock);
err = team_port_add(team, port_dev, extack);
- mutex_unlock(&team->lock);
if (!err)
netdev_change_features(dev);
@@ -1987,9 +1971,7 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
struct team *team = netdev_priv(dev);
int err;
- mutex_lock(&team->lock);
err = team_port_del(team, port_dev);
- mutex_unlock(&team->lock);
if (err)
return err;
@@ -2945,11 +2927,7 @@ static void __team_port_change_port_removed(struct team_port *port)
static void team_port_change_check(struct team_port *port, bool linkup)
{
- struct team *team = port->team;
-
- mutex_lock(&team->lock);
__team_port_change_check(port, linkup);
- mutex_unlock(&team->lock);
}
--
next prev parent reply other threads:[~2024-08-02 14:20 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-09 16:54 [syzbot] [net?] possible deadlock in team_device_event (3) syzbot
2024-07-26 18:36 ` syzbot
2024-07-30 11:23 ` [syzbot] " syzbot
2024-07-30 12:02 ` syzbot
2024-07-30 12:41 ` syzbot
2024-07-31 12:18 ` syzbot
2024-07-31 13:20 ` syzbot
2024-07-31 14:16 ` syzbot
2024-08-01 9:57 ` syzbot
2024-08-02 14:20 ` syzbot [this message]
2025-06-27 13:23 ` Tetsuo Handa
2025-06-27 13:43 ` syzbot
2025-06-27 13:47 ` Tetsuo Handa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=000000000000413882061eb4096f@google.com \
--to=syzbot+b668da2bc4cb9670bf58@syzkaller.appspotmail.com \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox