From: Bui Quang Minh <minhquangbui99@gmail.com>
To: netdev@vger.kernel.org
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
"Jason Wang" <jasowang@redhat.com>,
"Xuan Zhuo" <xuanzhuo@linux.alibaba.com>,
"Eugenio Pérez" <eperezma@redhat.com>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Paolo Abeni" <pabeni@redhat.com>,
"Alexei Starovoitov" <ast@kernel.org>,
"Daniel Borkmann" <daniel@iogearbox.net>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
"John Fastabend" <john.fastabend@gmail.com>,
"Stanislav Fomichev" <sdf@fomichev.me>,
virtualization@lists.linux.dev, linux-kernel@vger.kernel.org,
bpf@vger.kernel.org, "Bui Quang Minh" <minhquangbui99@gmail.com>
Subject: [PATCH net v2 2/3] virtio-net: remove unused delayed refill worker
Date: Fri, 2 Jan 2026 22:20:22 +0700 [thread overview]
Message-ID: <20260102152023.10773-3-minhquangbui99@gmail.com> (raw)
In-Reply-To: <20260102152023.10773-1-minhquangbui99@gmail.com>
Since we change to retry refilling receive buffer in NAPI poll instead
of delayed worker, remove all unused delayed refill worker code.
Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
---
drivers/net/virtio_net.c | 86 ----------------------------------------
1 file changed, 86 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ac514c9383ae..7e77a05b5662 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -441,9 +441,6 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for delayed refilling if we run low on memory. */
- struct delayed_work refill;
-
/* UDP tunnel support */
bool tx_tnl;
@@ -451,12 +448,6 @@ struct virtnet_info {
bool rx_tnl_csum;
- /* Is delayed refill enabled? */
- bool refill_enabled;
-
- /* The lock to synchronize the access to refill_enabled */
- spinlock_t refill_lock;
-
/* Work struct for config space updates */
struct work_struct config_work;
@@ -720,20 +711,6 @@ static void virtnet_rq_free_buf(struct virtnet_info *vi,
put_page(virt_to_head_page(buf));
}
-static void enable_delayed_refill(struct virtnet_info *vi)
-{
- spin_lock_bh(&vi->refill_lock);
- vi->refill_enabled = true;
- spin_unlock_bh(&vi->refill_lock);
-}
-
-static void disable_delayed_refill(struct virtnet_info *vi)
-{
- spin_lock_bh(&vi->refill_lock);
- vi->refill_enabled = false;
- spin_unlock_bh(&vi->refill_lock);
-}
-
static void enable_rx_mode_work(struct virtnet_info *vi)
{
rtnl_lock();
@@ -2948,42 +2925,6 @@ static void virtnet_napi_disable(struct receive_queue *rq)
napi_disable(napi);
}
-static void refill_work(struct work_struct *work)
-{
- struct virtnet_info *vi =
- container_of(work, struct virtnet_info, refill.work);
- bool still_empty;
- int i;
-
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- struct receive_queue *rq = &vi->rq[i];
-
- /*
- * When queue API support is added in the future and the call
- * below becomes napi_disable_locked, this driver will need to
- * be refactored.
- *
- * One possible solution would be to:
- * - cancel refill_work with cancel_delayed_work (note:
- * non-sync)
- * - cancel refill_work with cancel_delayed_work_sync in
- * virtnet_remove after the netdev is unregistered
- * - wrap all of the work in a lock (perhaps the netdev
- * instance lock)
- * - check netif_running() and return early to avoid a race
- */
- napi_disable(&rq->napi);
- still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_do_enable(rq->vq, &rq->napi);
-
- /* In theory, this can happen: if we don't get any buffers in
- * we will *never* try to fill again.
- */
- if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
- }
-}
-
static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
struct receive_queue *rq,
int budget,
@@ -3222,8 +3163,6 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
- enable_delayed_refill(vi);
-
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
/* If this fails, we will retry later in
@@ -3249,9 +3188,6 @@ static int virtnet_open(struct net_device *dev)
return 0;
err_enable_qp:
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
-
for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
virtnet_cancel_dim(vi, &vi->rq[i].dim);
@@ -3445,24 +3381,12 @@ static void virtnet_rx_pause_all(struct virtnet_info *vi)
{
int i;
- /*
- * Make sure refill_work does not run concurrently to
- * avoid napi_disable race which leads to deadlock.
- */
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++)
__virtnet_rx_pause(vi, &vi->rq[i]);
}
static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
{
- /*
- * Make sure refill_work does not run concurrently to
- * avoid napi_disable race which leads to deadlock.
- */
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
__virtnet_rx_pause(vi, rq);
}
@@ -3486,7 +3410,6 @@ static void virtnet_rx_resume_all(struct virtnet_info *vi)
{
int i;
- enable_delayed_refill(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
__virtnet_rx_resume(vi, &vi->rq[i], true);
@@ -3497,7 +3420,6 @@ static void virtnet_rx_resume_all(struct virtnet_info *vi)
static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
{
- enable_delayed_refill(vi);
__virtnet_rx_resume(vi, rq, true);
}
@@ -3848,10 +3770,6 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
- /* Make sure NAPI doesn't schedule refill work */
- disable_delayed_refill(vi);
- /* Make sure refill_work doesn't re-enable napi! */
- cancel_delayed_work_sync(&vi->refill);
/* Prevent the config change callback from changing carrier
* after close
*/
@@ -5807,7 +5725,6 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
- enable_delayed_refill(vi);
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
@@ -6564,7 +6481,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
if (!vi->rq)
goto err_rq;
- INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
@@ -6906,7 +6822,6 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
- spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
vi->mergeable_rx_bufs = true;
@@ -7170,7 +7085,6 @@ static int virtnet_probe(struct virtio_device *vdev)
net_failover_destroy(vi->failover);
free_vqs:
virtio_reset_device(vdev);
- cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
free:
--
2.43.0
next prev parent reply other threads:[~2026-01-02 15:20 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-02 15:20 [PATCH net v2 0/3] virtio-net: fix the deadlock when disabling rx NAPI Bui Quang Minh
2026-01-02 15:20 ` [PATCH net v2 1/3] virtio-net: don't schedule delayed refill worker Bui Quang Minh
2026-01-03 0:16 ` Michael S. Tsirkin
2026-01-03 16:57 ` Michael S. Tsirkin
2026-01-03 17:34 ` Michael S. Tsirkin
2026-01-04 4:33 ` Bui Quang Minh
2026-01-04 13:24 ` Michael S. Tsirkin
2026-01-04 6:09 ` Jason Wang
2026-01-04 8:34 ` Bui Quang Minh
2026-01-04 14:03 ` Michael S. Tsirkin
2026-01-04 14:54 ` Bui Quang Minh
2026-01-04 15:12 ` Michael S. Tsirkin
2026-01-05 15:03 ` Bui Quang Minh
2026-01-05 20:35 ` Michael S. Tsirkin
2026-01-04 13:25 ` Michael S. Tsirkin
2026-01-02 15:20 ` Bui Quang Minh [this message]
2026-01-03 9:09 ` [PATCH net v2 2/3] virtio-net: remove unused " Michael S. Tsirkin
2026-01-02 15:20 ` [PATCH net v2 3/3] virtio-net: clean up __virtnet_rx_pause/resume Bui Quang Minh
2026-01-03 9:13 ` [PATCH net v2 0/3] virtio-net: fix the deadlock when disabling rx NAPI Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260102152023.10773-3-minhquangbui99@gmail.com \
--to=minhquangbui99@gmail.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=eperezma@redhat.com \
--cc=hawk@kernel.org \
--cc=jasowang@redhat.com \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=virtualization@lists.linux.dev \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).