netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Jurgens <danielj@nvidia.com>
To: <netdev@vger.kernel.org>
Cc: <mst@redhat.com>, <jasowang@redhat.com>,
	<xuanzhuo@linux.alibaba.com>, <virtualization@lists.linux.dev>,
	<davem@davemloft.net>, <edumazet@google.com>, <kuba@kernel.org>,
	<pabeni@redhat.com>, <jiri@nvidia.com>,
	Daniel Jurgens <danielj@nvidia.com>
Subject: [PATCH net-next v2 5/6] virtio_net: Add a lock for per queue RX coalesce
Date: Thu, 28 Mar 2024 06:47:14 +0200	[thread overview]
Message-ID: <20240328044715.266641-6-danielj@nvidia.com> (raw)
In-Reply-To: <20240328044715.266641-1-danielj@nvidia.com>

Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a spin
lock per queue.

Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
 drivers/net/virtio_net.c | 27 ++++++++++++++++++---------
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9c4bfb1eb15c..859d767411f8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -190,6 +190,7 @@ struct receive_queue {
 	u32 packets_in_napi;
 
 	struct virtnet_interrupt_coalesce intr_coal;
+	spinlock_t intr_coal_lock;
 
 	/* Chain pages by the private ptr. */
 	struct page *pages;
@@ -3087,11 +3088,13 @@ static int virtnet_set_ringparam(struct net_device *dev,
 				return err;
 
 			/* The reason is same as the transmit virtqueue reset */
-			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
-							       vi->intr_coal_rx.max_usecs,
-							       vi->intr_coal_rx.max_packets);
-			if (err)
-				return err;
+			scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
+				err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
+								       vi->intr_coal_rx.max_usecs,
+								       vi->intr_coal_rx.max_packets);
+				if (err)
+					return err;
+			}
 		}
 	}
 
@@ -3510,8 +3513,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
 	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
 	for (i = 0; i < vi->max_queue_pairs; i++) {
-		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
-		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+		scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
+			vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+			vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+		}
 	}
 
 	return 0;
@@ -3542,6 +3547,7 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
 	u32 max_usecs, max_packets;
 	int err;
 
+	guard(spinlock)(&vi->rq[queue].intr_coal_lock);
 	max_usecs = vi->rq[queue].intr_coal.max_usecs;
 	max_packets = vi->rq[queue].intr_coal.max_packets;
 
@@ -3604,8 +3610,9 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 	qnum = rq - vi->rq;
 
 	if (!rq->dim_enabled)
-		continue;
+		goto out;
 
+	guard(spinlock)(&rq->intr_coal_lock);
 	update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 	if (update_moder.usec != rq->intr_coal.max_usecs ||
 	    update_moder.pkts != rq->intr_coal.max_packets) {
@@ -3617,7 +3624,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
 				 dev->name, qnum);
 		dim->state = DIM_START_MEASURE;
 	}
-
+out:
 	rtnl_unlock();
 }
 
@@ -3756,6 +3763,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
 		return -EINVAL;
 
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+		guard(spinlock)(&vi->rq[queue].intr_coal_lock);
 		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
 		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
 		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
@@ -4485,6 +4493,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
 
 		u64_stats_init(&vi->rq[i].stats.syncp);
 		u64_stats_init(&vi->sq[i].stats.syncp);
+		spin_lock_init(&vi->rq[i].intr_coal_lock);
 	}
 
 	return 0;
-- 
2.42.0


  parent reply	other threads:[~2024-03-28  4:47 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-28  4:47 [PATCH net-next v2 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
2024-03-28 21:58   ` kernel test robot
2024-03-28  4:47 ` [PATCH net-next v2 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
2024-03-28  4:47 ` [PATCH net-next v2 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
2024-03-28  4:57   ` Heng Qi
2024-03-28  5:12     ` Dan Jurgens
2024-03-28  4:47 ` Daniel Jurgens [this message]
2024-03-28  4:47 ` [PATCH net-next v2 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240328044715.266641-6-danielj@nvidia.com \
    --to=danielj@nvidia.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=jasowang@redhat.com \
    --cc=jiri@nvidia.com \
    --cc=kuba@kernel.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=virtualization@lists.linux.dev \
    --cc=xuanzhuo@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).