From: John Fastabend <john.fastabend@gmail.com>
To: kubakici@wp.pl, jasowang@redhat.com, ast@fb.com, mst@redhat.com
Cc: john.r.fastabend@intel.com, netdev@vger.kernel.org,
john.fastabend@gmail.com
Subject: [net-next PATCH 1/5] virtio_net: wrap rtnl_lock in test for calling with lock already held
Date: Thu, 02 Feb 2017 15:20:31 -0800 [thread overview]
Message-ID: <20170202232031.14366.69142.stgit@john-Precision-Tower-5810> (raw)
In-Reply-To: <20170202231404.14366.64298.stgit@john-Precision-Tower-5810>
For XDP use case and to allow ethtool reset tests it is useful to be
able to use reset paths from contexts where rtnl lock is already
held.
This requries updating virtnet_set_queues and free_receive_bufs the
two places where rtnl_lock is taken in virtio_net. To do this we
use the following pattern,
_foo(...) { do stuff }
foo(...) { rtnl_lock(); _foo(...); rtnl_unlock()};
this allows us to use freeze()/restore() flow from both contexts.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
---
drivers/net/virtio_net.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bd22cf3..f8ba586 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1342,7 +1342,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
rtnl_unlock();
}
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct scatterlist sg;
struct net_device *dev = vi->dev;
@@ -1368,6 +1368,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
return 0;
}
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+{
+ int err;
+
+ rtnl_lock();
+ err = _virtnet_set_queues(vi, queue_pairs);
+ rtnl_unlock();
+ return err;
+}
+
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -1620,7 +1630,7 @@ static int virtnet_set_channels(struct net_device *dev,
return -EINVAL;
get_online_cpus();
- err = virtnet_set_queues(vi, queue_pairs);
+ err = _virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
@@ -1752,7 +1762,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
return -ENOMEM;
}
- err = virtnet_set_queues(vi, curr_qp + xdp_qp);
+ err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err) {
dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
return err;
@@ -1761,7 +1771,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (prog) {
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
if (IS_ERR(prog)) {
- virtnet_set_queues(vi, curr_qp);
+ _virtnet_set_queues(vi, curr_qp);
return PTR_ERR(prog);
}
}
@@ -1880,12 +1890,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
kfree(vi->sq);
}
-static void free_receive_bufs(struct virtnet_info *vi)
+static void _free_receive_bufs(struct virtnet_info *vi)
{
struct bpf_prog *old_prog;
int i;
- rtnl_lock();
for (i = 0; i < vi->max_queue_pairs; i++) {
while (vi->rq[i].pages)
__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
@@ -1895,6 +1904,12 @@ static void free_receive_bufs(struct virtnet_info *vi)
if (old_prog)
bpf_prog_put(old_prog);
}
+}
+
+static void free_receive_bufs(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ _free_receive_bufs(vi);
rtnl_unlock();
}
@@ -2333,9 +2348,7 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_unregister_netdev;
}
- rtnl_lock();
virtnet_set_queues(vi, vi->curr_queue_pairs);
- rtnl_unlock();
/* Assume link up if device can't report link status,
otherwise get link status from config. */
@@ -2444,9 +2457,7 @@ static int virtnet_restore(struct virtio_device *vdev)
netif_device_attach(vi->dev);
- rtnl_lock();
virtnet_set_queues(vi, vi->curr_queue_pairs);
- rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
if (err)
next prev parent reply other threads:[~2017-02-02 23:20 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-02 23:20 [net-next PATCH 0/5] XDP adjust head support for virtio John Fastabend
2017-02-02 23:20 ` John Fastabend [this message]
2017-02-02 23:20 ` [net-next PATCH 2/5] virtio_net: factor out xdp handler for readability John Fastabend
2017-02-03 1:05 ` Jakub Kicinski
2017-02-03 2:52 ` John Fastabend
2017-02-03 4:23 ` Michael S. Tsirkin
2017-02-02 23:21 ` [net-next PATCH 3/5] virtio_net: remove duplicate queue pair binding in XDP John Fastabend
2017-02-03 4:23 ` Michael S. Tsirkin
2017-02-02 23:21 ` [net-next PATCH 4/5] virtio_net: refactor freeze/restore logic into virtnet reset logic John Fastabend
2017-02-02 23:21 ` [net-next PATCH 5/5] virtio_net: XDP support for adjust_head John Fastabend
2017-02-03 3:42 ` Michael S. Tsirkin
2017-02-03 4:02 ` Alexei Starovoitov
2017-02-03 4:21 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170202232031.14366.69142.stgit@john-Precision-Tower-5810 \
--to=john.fastabend@gmail.com \
--cc=ast@fb.com \
--cc=jasowang@redhat.com \
--cc=john.r.fastabend@intel.com \
--cc=kubakici@wp.pl \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox