* [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ
@ 2024-04-12 19:53 Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
` (5 more replies)
0 siblings, 6 replies; 13+ messages in thread
From: Daniel Jurgens @ 2024-04-12 19:53 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Currently the buffer used for control VQ commands is protected by the
RTNL lock. Previously this wasn't a major concern because the control
VQ was only used during device setup and user interaction. With the
recent addition of dynamic interrupt moderation the control VQ may be
used frequently during normal operation.
This series removes the RNTL lock dependency by introducing a spin lock
to protect the control buffer and writing SGs to the control VQ.
v3:
- Changed type of _offloads to __virtio16 to fix static
analysis warning.
- Moved a misplaced hunk to the correct patch.
v2:
- New patch to only process the provided queue in
virtnet_dim_work
- New patch to lock per queue rx coalescing structure.
Daniel Jurgens (6):
virtio_net: Store RSS setting in virtnet_info
virtio_net: Remove command data from control_buf
virtio_net: Add a lock for the command VQ.
virtio_net: Do DIM update for specified queue only
virtio_net: Add a lock for per queue RX coalesce
virtio_net: Remove rtnl lock protection of command buffers
drivers/net/virtio_net.c | 243 +++++++++++++++++++++------------------
1 file changed, 134 insertions(+), 109 deletions(-)
--
2.42.0
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH net-next v3 1/6] virtio_net: Store RSS setting in virtnet_info
2024-04-12 19:53 [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
@ 2024-04-12 19:53 ` Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
` (4 subsequent siblings)
5 siblings, 0 replies; 13+ messages in thread
From: Daniel Jurgens @ 2024-04-12 19:53 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Stop storing RSS setting in the control buffer. This is prep work for
removing RTNL lock protection of the control buffer.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 40 ++++++++++++++++++++--------------------
1 file changed, 20 insertions(+), 20 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 115c3c5414f2..7248dae54e1c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -245,7 +245,6 @@ struct control_buf {
u8 allmulti;
__virtio16 vid;
__virtio64 offloads;
- struct virtio_net_ctrl_rss rss;
struct virtio_net_ctrl_coal_tx coal_tx;
struct virtio_net_ctrl_coal_rx coal_rx;
struct virtio_net_ctrl_coal_vq coal_vq;
@@ -287,6 +286,7 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
+ struct virtio_net_ctrl_rss rss;
/* Has control virtqueue */
bool has_cvq;
@@ -3087,17 +3087,17 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
sg_init_table(sgs, 4);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
- sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+ sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
- sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
- sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+ sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
+ sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+ sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+ sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -3113,21 +3113,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
u32 indir_val = 0;
int i = 0;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss.hash_types = vi->rss_hash_types_supported;
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+ vi->rss.indirection_table_mask = vi->rss_indir_table_size
? vi->rss_indir_table_size - 1 : 0;
- vi->ctrl->rss.unclassified_queue = 0;
+ vi->rss.unclassified_queue = 0;
for (; i < vi->rss_indir_table_size; ++i) {
indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
- vi->ctrl->rss.indirection_table[i] = indir_val;
+ vi->rss.indirection_table[i] = indir_val;
}
- vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
- vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+ vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+ vi->rss.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
}
static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -3238,7 +3238,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
if (vi->dev->features & NETIF_F_RXHASH)
return virtnet_commit_rss_command(vi);
}
@@ -3791,11 +3791,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
+ rxfh->indir[i] = vi->rss.indirection_table[i];
}
if (rxfh->key)
- memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3819,7 +3819,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss.indirection_table[i] = rxfh->indir[i];
update = true;
}
@@ -3831,7 +3831,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
if (!vi->has_rss && !vi->has_rss_hash_report)
return -EOPNOTSUPP;
- memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+ memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
update = true;
}
@@ -4156,9 +4156,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
else
- vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
--
2.42.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH net-next v3 2/6] virtio_net: Remove command data from control_buf
2024-04-12 19:53 [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
@ 2024-04-12 19:53 ` Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
` (3 subsequent siblings)
5 siblings, 0 replies; 13+ messages in thread
From: Daniel Jurgens @ 2024-04-12 19:53 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Allocate memory for the data when it's used. Ideally the could be on the
stack, but we can't DMA stack memory. With this change only the header
and status memory are shared between commands, which will allow using a
tighter lock than RTNL.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 111 ++++++++++++++++++++++++++-------------
1 file changed, 75 insertions(+), 36 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7248dae54e1c..0ee192b45e1e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -240,14 +240,6 @@ struct virtio_net_ctrl_rss {
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
- struct virtio_net_ctrl_mq mq;
- u8 promisc;
- u8 allmulti;
- __virtio16 vid;
- __virtio64 offloads;
- struct virtio_net_ctrl_coal_tx coal_tx;
- struct virtio_net_ctrl_coal_rx coal_rx;
- struct virtio_net_ctrl_coal_vq coal_vq;
};
struct virtnet_info {
@@ -2672,14 +2664,19 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
+ struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
struct scatterlist sg;
struct net_device *dev = vi->dev;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
- sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return -ENOMEM;
+
+ mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+ sg_init_one(&sg, mq, sizeof(*mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -2708,6 +2705,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
static int virtnet_close(struct net_device *dev)
{
+ u8 *promisc_allmulti __free(kfree) = NULL;
struct virtnet_info *vi = netdev_priv(dev);
int i;
@@ -2732,6 +2730,7 @@ static void virtnet_rx_mode_work(struct work_struct *work)
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
+ u8 *promisc_allmulti;
int uc_count;
int mc_count;
void *buf;
@@ -2743,22 +2742,27 @@ static void virtnet_rx_mode_work(struct work_struct *work)
rtnl_lock();
- vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
- vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC);
+ if (!promisc_allmulti) {
+ dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
+ return;
+ }
- sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
+ *promisc_allmulti = !!(dev->flags & IFF_PROMISC);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
- vi->ctrl->promisc ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
- sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
+ *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
- vi->ctrl->allmulti ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
netif_addr_lock_bh(dev);
@@ -2819,10 +2823,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -2834,10 +2843,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2950,12 +2964,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 vqn, u32 max_usecs, u32 max_packets)
{
+ struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
struct scatterlist sgs;
- vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
- vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
- vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
- sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+ coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
+ if (!coal_vq)
+ return -ENOMEM;
+
+ coal_vq->vqn = cpu_to_le16(vqn);
+ coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
+ coal_vq->coal.max_packets = cpu_to_le32(max_packets);
+ sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
@@ -3101,11 +3120,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
- : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
- dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
- return false;
- }
+ : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
+ goto err;
+
return true;
+
+err:
+ dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+ return false;
+
}
static void virtnet_init_default_rss(struct virtnet_info *vi)
@@ -3410,12 +3433,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
struct scatterlist sgs_tx;
int i;
- vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
- vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
- sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+ coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
+ if (!coal_tx)
+ return -ENOMEM;
+
+ coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -3435,6 +3463,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
int i;
@@ -3453,6 +3482,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
return 0;
}
+ coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
+ if (!coal_rx)
+ return -ENOMEM;
+
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
for (i = 0; i < vi->max_queue_pairs; i++)
@@ -3463,9 +3496,9 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
* we need apply the global new params even if they
* are not updated.
*/
- vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
- vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
- sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+ coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
@@ -3951,10 +3984,16 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
+ __virtio64 *_offloads __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
- sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
+ _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
+ if (!_offloads)
+ return -ENOMEM;
+
+ *_offloads = cpu_to_virtio64(vi->vdev, offloads);
+
+ sg_init_one(&sg, _offloads, sizeof(*_offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
--
2.42.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH net-next v3 3/6] virtio_net: Add a lock for the command VQ.
2024-04-12 19:53 [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
@ 2024-04-12 19:53 ` Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
` (2 subsequent siblings)
5 siblings, 0 replies; 13+ messages in thread
From: Daniel Jurgens @ 2024-04-12 19:53 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
The command VQ will no longer be protected by the RTNL lock. Use a
spinlock to protect the control buffer header and the VQ.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0ee192b45e1e..d02f83a919a7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -282,6 +282,7 @@ struct virtnet_info {
/* Has control virtqueue */
bool has_cvq;
+ spinlock_t cvq_lock;
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
@@ -2529,6 +2530,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
+ guard(spinlock)(&vi->cvq_lock);
vi->ctrl->status = ~0;
vi->ctrl->hdr.class = class;
vi->ctrl->hdr.cmd = cmd;
@@ -4818,8 +4820,10 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
vi->any_header_sg = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
vi->has_cvq = true;
+ spin_lock_init(&vi->cvq_lock);
+ }
if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
mtu = virtio_cread16(vdev,
--
2.42.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH net-next v3 4/6] virtio_net: Do DIM update for specified queue only
2024-04-12 19:53 [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (2 preceding siblings ...)
2024-04-12 19:53 ` [PATCH net-next v3 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
@ 2024-04-12 19:53 ` Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
5 siblings, 0 replies; 13+ messages in thread
From: Daniel Jurgens @ 2024-04-12 19:53 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Since we no longer have to hold the RTNL lock here just do updates for
the specified queue.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
drivers/net/virtio_net.c | 40 +++++++++++++++-------------------------
1 file changed, 15 insertions(+), 25 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d02f83a919a7..b3aa4d2a15e9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3596,38 +3596,28 @@ static void virtnet_rx_dim_work(struct work_struct *work)
struct virtnet_info *vi = rq->vq->vdev->priv;
struct net_device *dev = vi->dev;
struct dim_cq_moder update_moder;
- int i, qnum, err;
+ int qnum, err;
if (!rtnl_trylock())
return;
- /* Each rxq's work is queued by "net_dim()->schedule_work()"
- * in response to NAPI traffic changes. Note that dim->profile_ix
- * for each rxq is updated prior to the queuing action.
- * So we only need to traverse and update profiles for all rxqs
- * in the work which is holding rtnl_lock.
- */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- rq = &vi->rq[i];
- dim = &rq->dim;
- qnum = rq - vi->rq;
+ qnum = rq - vi->rq;
- if (!rq->dim_enabled)
- continue;
+ if (!rq->dim_enabled)
+ goto out;
- update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- if (update_moder.usec != rq->intr_coal.max_usecs ||
- update_moder.pkts != rq->intr_coal.max_packets) {
- err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
- update_moder.usec,
- update_moder.pkts);
- if (err)
- pr_debug("%s: Failed to send dim parameters on rxq%d\n",
- dev->name, qnum);
- dim->state = DIM_START_MEASURE;
- }
+ update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ if (update_moder.usec != rq->intr_coal.max_usecs ||
+ update_moder.pkts != rq->intr_coal.max_packets) {
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
+ update_moder.usec,
+ update_moder.pkts);
+ if (err)
+ pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+ dev->name, qnum);
+ dim->state = DIM_START_MEASURE;
}
-
+out:
rtnl_unlock();
}
--
2.42.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-04-12 19:53 [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (3 preceding siblings ...)
2024-04-12 19:53 ` [PATCH net-next v3 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
@ 2024-04-12 19:53 ` Daniel Jurgens
2024-04-13 2:21 ` Jakub Kicinski
2024-04-15 13:41 ` Heng Qi
2024-04-12 19:53 ` [PATCH net-next v3 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
5 siblings, 2 replies; 13+ messages in thread
From: Daniel Jurgens @ 2024-04-12 19:53 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
Once the RTNL locking around the control buffer is removed there can be
contention on the per queue RX interrupt coalescing data. Use a spin
lock per queue.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
---
drivers/net/virtio_net.c | 23 ++++++++++++++++-------
1 file changed, 16 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b3aa4d2a15e9..8724caa7c2ed 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -190,6 +190,7 @@ struct receive_queue {
u32 packets_in_napi;
struct virtnet_interrupt_coalesce intr_coal;
+ spinlock_t intr_coal_lock;
/* Chain pages by the private ptr. */
struct page *pages;
@@ -3087,11 +3088,13 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;
/* The reason is same as the transmit virtqueue reset */
- err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
- vi->intr_coal_rx.max_usecs,
- vi->intr_coal_rx.max_packets);
- if (err)
- return err;
+ scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
+ vi->intr_coal_rx.max_usecs,
+ vi->intr_coal_rx.max_packets);
+ if (err)
+ return err;
+ }
}
}
@@ -3510,8 +3513,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
- vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
- vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+ scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
+ vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
+ vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+ }
}
return 0;
@@ -3542,6 +3547,7 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
u32 max_usecs, max_packets;
int err;
+ guard(spinlock)(&vi->rq[queue].intr_coal_lock);
max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets;
@@ -3606,6 +3612,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
if (!rq->dim_enabled)
goto out;
+ guard(spinlock)(&rq->intr_coal_lock);
update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
if (update_moder.usec != rq->intr_coal.max_usecs ||
update_moder.pkts != rq->intr_coal.max_packets) {
@@ -3756,6 +3763,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
return -EINVAL;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ guard(spinlock)(&vi->rq[queue].intr_coal_lock);
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
@@ -4501,6 +4509,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp);
+ spin_lock_init(&vi->rq[i].intr_coal_lock);
}
return 0;
--
2.42.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH net-next v3 6/6] virtio_net: Remove rtnl lock protection of command buffers
2024-04-12 19:53 [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
` (4 preceding siblings ...)
2024-04-12 19:53 ` [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
@ 2024-04-12 19:53 ` Daniel Jurgens
5 siblings, 0 replies; 13+ messages in thread
From: Daniel Jurgens @ 2024-04-12 19:53 UTC (permalink / raw)
To: netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri, Daniel Jurgens
The rtnl lock is no longer needed to protect the control buffer and
command VQ.
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/net/virtio_net.c | 27 +++++----------------------
1 file changed, 5 insertions(+), 22 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8724caa7c2ed..8df8585834f0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2658,14 +2658,12 @@ static void virtnet_stats(struct net_device *dev,
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
- rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
- rtnl_unlock();
}
-static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
struct scatterlist sg;
@@ -2696,16 +2694,6 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
return 0;
}
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
-{
- int err;
-
- rtnl_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
- rtnl_unlock();
- return err;
-}
-
static int virtnet_close(struct net_device *dev)
{
u8 *promisc_allmulti __free(kfree) = NULL;
@@ -3311,7 +3299,7 @@ static int virtnet_set_channels(struct net_device *dev,
return -EINVAL;
cpus_read_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
+ err = virtnet_set_queues(vi, queue_pairs);
if (err) {
cpus_read_unlock();
goto err;
@@ -3604,13 +3592,10 @@ static void virtnet_rx_dim_work(struct work_struct *work)
struct dim_cq_moder update_moder;
int qnum, err;
- if (!rtnl_trylock())
- return;
-
qnum = rq - vi->rq;
if (!rq->dim_enabled)
- goto out;
+ return;
guard(spinlock)(&rq->intr_coal_lock);
update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
@@ -3624,8 +3609,6 @@ static void virtnet_rx_dim_work(struct work_struct *work)
dev->name, qnum);
dim->state = DIM_START_MEASURE;
}
-out:
- rtnl_unlock();
}
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
@@ -4093,7 +4076,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
synchronize_net();
}
- err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err)
goto err;
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
@@ -4915,7 +4898,7 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- _virtnet_set_queues(vi, vi->curr_queue_pairs);
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
/* a random MAC address has been assigned, notify the device.
* We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
--
2.42.0
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-04-12 19:53 ` [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
@ 2024-04-13 2:21 ` Jakub Kicinski
2024-04-16 3:15 ` Dan Jurgens
2024-04-15 13:41 ` Heng Qi
1 sibling, 1 reply; 13+ messages in thread
From: Jakub Kicinski @ 2024-04-13 2:21 UTC (permalink / raw)
To: Daniel Jurgens
Cc: netdev, mst, jasowang, xuanzhuo, virtualization, davem, edumazet,
pabeni, jiri
On Fri, 12 Apr 2024 14:53:08 -0500 Daniel Jurgens wrote:
> Once the RTNL locking around the control buffer is removed there can be
> contention on the per queue RX interrupt coalescing data. Use a spin
> lock per queue.
Does not compile on Clang.
> + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> + vi->intr_coal_rx.max_usecs,
> + vi->intr_coal_rx.max_packets);
> + if (err)
> + return err;
> + }
Do you really think this needs a scoped guard and 4th indentation level,
instead of just:
..lock(..);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
..unlock(..);
if (err)
return err;
> + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> + vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
> + vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
> + }
:-|
--
pw-bot: cr
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-04-12 19:53 ` [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
2024-04-13 2:21 ` Jakub Kicinski
@ 2024-04-15 13:41 ` Heng Qi
2024-04-15 20:24 ` Dan Jurgens
1 sibling, 1 reply; 13+ messages in thread
From: Heng Qi @ 2024-04-15 13:41 UTC (permalink / raw)
To: Daniel Jurgens, netdev
Cc: mst, jasowang, xuanzhuo, virtualization, davem, edumazet, kuba,
pabeni, jiri
在 2024/4/13 上午3:53, Daniel Jurgens 写道:
> Once the RTNL locking around the control buffer is removed there can be
> contention on the per queue RX interrupt coalescing data. Use a spin
> lock per queue.
>
> Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> ---
> drivers/net/virtio_net.c | 23 ++++++++++++++++-------
> 1 file changed, 16 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index b3aa4d2a15e9..8724caa7c2ed 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -190,6 +190,7 @@ struct receive_queue {
> u32 packets_in_napi;
>
> struct virtnet_interrupt_coalesce intr_coal;
> + spinlock_t intr_coal_lock;
>
> /* Chain pages by the private ptr. */
> struct page *pages;
> @@ -3087,11 +3088,13 @@ static int virtnet_set_ringparam(struct net_device *dev,
> return err;
>
> /* The reason is same as the transmit virtqueue reset */
> - err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> - vi->intr_coal_rx.max_usecs,
> - vi->intr_coal_rx.max_packets);
> - if (err)
> - return err;
> + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> + vi->intr_coal_rx.max_usecs,
> + vi->intr_coal_rx.max_packets);
> + if (err)
> + return err;
> + }
> }
> }
>
> @@ -3510,8 +3513,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
> vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
> vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
> for (i = 0; i < vi->max_queue_pairs; i++) {
> - vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
> - vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
> + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> + vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
> + vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
> + }
> }
>
> return 0;
> @@ -3542,6 +3547,7 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
> u32 max_usecs, max_packets;
> int err;
>
> + guard(spinlock)(&vi->rq[queue].intr_coal_lock);
> max_usecs = vi->rq[queue].intr_coal.max_usecs;
> max_packets = vi->rq[queue].intr_coal.max_packets;
>
> @@ -3606,6 +3612,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
> if (!rq->dim_enabled)
> goto out;
We should also protect rq->dim_enabled access, incorrect values may be
read in
rx_dim_worker because it is modified in set_coalesce/set_per_queue_coalesce.
Thanks.
>
> + guard(spinlock)(&rq->intr_coal_lock);
> update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
> if (update_moder.usec != rq->intr_coal.max_usecs ||
> update_moder.pkts != rq->intr_coal.max_packets) {
> @@ -3756,6 +3763,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
> return -EINVAL;
>
> if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
> + guard(spinlock)(&vi->rq[queue].intr_coal_lock);
> ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
> ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
> ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
> @@ -4501,6 +4509,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
>
> u64_stats_init(&vi->rq[i].stats.syncp);
> u64_stats_init(&vi->sq[i].stats.syncp);
> + spin_lock_init(&vi->rq[i].intr_coal_lock);
> }
>
> return 0;
^ permalink raw reply [flat|nested] 13+ messages in thread
* RE: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-04-15 13:41 ` Heng Qi
@ 2024-04-15 20:24 ` Dan Jurgens
0 siblings, 0 replies; 13+ messages in thread
From: Dan Jurgens @ 2024-04-15 20:24 UTC (permalink / raw)
To: Heng Qi, netdev@vger.kernel.org
Cc: mst@redhat.com, jasowang@redhat.com, xuanzhuo@linux.alibaba.com,
virtualization@lists.linux.dev, davem@davemloft.net,
edumazet@google.com, kuba@kernel.org, pabeni@redhat.com,
Jiri Pirko
> From: Heng Qi <hengqi@linux.alibaba.com>
> Sent: Monday, April 15, 2024 8:42 AM
> To: Dan Jurgens <danielj@nvidia.com>; netdev@vger.kernel.org
> Cc: mst@redhat.com; jasowang@redhat.com; xuanzhuo@linux.alibaba.com;
> virtualization@lists.linux.dev; davem@davemloft.net;
> edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Jiri Pirko
> <jiri@nvidia.com>
> Subject: Re: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX
> coalesce
>
>
>
> 在 2024/4/13 上午3:53, Daniel Jurgens 写道:
> > Once the RTNL locking around the control buffer is removed there can
> > be contention on the per queue RX interrupt coalescing data. Use a
> > spin lock per queue.
> >
> > Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
> > ---
> > drivers/net/virtio_net.c | 23 ++++++++++++++++-------
> > 1 file changed, 16 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index
> > b3aa4d2a15e9..8724caa7c2ed 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -190,6 +190,7 @@ struct receive_queue {
> > u32 packets_in_napi;
> >
> > struct virtnet_interrupt_coalesce intr_coal;
> > + spinlock_t intr_coal_lock;
> >
> > /* Chain pages by the private ptr. */
> > struct page *pages;
> > @@ -3087,11 +3088,13 @@ static int virtnet_set_ringparam(struct
> net_device *dev,
> > return err;
> >
> > /* The reason is same as the transmit virtqueue reset
> */
> > - err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> > - vi-
> >intr_coal_rx.max_usecs,
> > - vi-
> >intr_coal_rx.max_packets);
> > - if (err)
> > - return err;
> > + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> > + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> > + vi-
> >intr_coal_rx.max_usecs,
> > + vi-
> >intr_coal_rx.max_packets);
> > + if (err)
> > + return err;
> > + }
> > }
> > }
> >
> > @@ -3510,8 +3513,10 @@ static int
> virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
> > vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
> > vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
> > for (i = 0; i < vi->max_queue_pairs; i++) {
> > - vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
> > - vi->rq[i].intr_coal.max_packets = ec-
> >rx_max_coalesced_frames;
> > + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> > + vi->rq[i].intr_coal.max_usecs = ec-
> >rx_coalesce_usecs;
> > + vi->rq[i].intr_coal.max_packets = ec-
> >rx_max_coalesced_frames;
> > + }
> > }
> >
> > return 0;
> > @@ -3542,6 +3547,7 @@ static int
> virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
> > u32 max_usecs, max_packets;
> > int err;
> >
> > + guard(spinlock)(&vi->rq[queue].intr_coal_lock);
> > max_usecs = vi->rq[queue].intr_coal.max_usecs;
> > max_packets = vi->rq[queue].intr_coal.max_packets;
> >
> > @@ -3606,6 +3612,7 @@ static void virtnet_rx_dim_work(struct
> work_struct *work)
> > if (!rq->dim_enabled)
> > goto out;
>
> We should also protect rq->dim_enabled access, incorrect values may be
> read in rx_dim_worker because it is modified in
> set_coalesce/set_per_queue_coalesce.
Good point. Thanks
>
> Thanks.
>
> >
> > + guard(spinlock)(&rq->intr_coal_lock);
> > update_moder = net_dim_get_rx_moderation(dim->mode, dim-
> >profile_ix);
> > if (update_moder.usec != rq->intr_coal.max_usecs ||
> > update_moder.pkts != rq->intr_coal.max_packets) { @@ -3756,6
> > +3763,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device
> *dev,
> > return -EINVAL;
> >
> > if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
> > + guard(spinlock)(&vi->rq[queue].intr_coal_lock);
> > ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
> > ec->tx_coalesce_usecs = vi-
> >sq[queue].intr_coal.max_usecs;
> > ec->tx_max_coalesced_frames = vi-
> >sq[queue].intr_coal.max_packets;
> > @@ -4501,6 +4509,7 @@ static int virtnet_alloc_queues(struct
> > virtnet_info *vi)
> >
> > u64_stats_init(&vi->rq[i].stats.syncp);
> > u64_stats_init(&vi->sq[i].stats.syncp);
> > + spin_lock_init(&vi->rq[i].intr_coal_lock);
> > }
> >
> > return 0;
^ permalink raw reply [flat|nested] 13+ messages in thread
* RE: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-04-13 2:21 ` Jakub Kicinski
@ 2024-04-16 3:15 ` Dan Jurgens
2024-04-16 14:27 ` Jakub Kicinski
0 siblings, 1 reply; 13+ messages in thread
From: Dan Jurgens @ 2024-04-16 3:15 UTC (permalink / raw)
To: Jakub Kicinski
Cc: netdev@vger.kernel.org, mst@redhat.com, jasowang@redhat.com,
xuanzhuo@linux.alibaba.com, virtualization@lists.linux.dev,
davem@davemloft.net, edumazet@google.com, pabeni@redhat.com,
Jiri Pirko
> From: Jakub Kicinski <kuba@kernel.org>
> Sent: Friday, April 12, 2024 9:21 PM
> To: Dan Jurgens <danielj@nvidia.com>
> Cc: netdev@vger.kernel.org; mst@redhat.com; jasowang@redhat.com;
> xuanzhuo@linux.alibaba.com; virtualization@lists.linux.dev;
> davem@davemloft.net; edumazet@google.com; pabeni@redhat.com; Jiri
> Pirko <jiri@nvidia.com>
> Subject: Re: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX
> coalesce
>
> On Fri, 12 Apr 2024 14:53:08 -0500 Daniel Jurgens wrote:
> > Once the RTNL locking around the control buffer is removed there can
> > be contention on the per queue RX interrupt coalescing data. Use a
> > spin lock per queue.
>
> Does not compile on Clang.
Which version? It compiles for me with:
$ clang -v
clang version 15.0.7 (Fedora 15.0.7-2.fc37)
>
> > + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> > + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> > + vi-
> >intr_coal_rx.max_usecs,
> > + vi-
> >intr_coal_rx.max_packets);
> > + if (err)
> > + return err;
> > + }
>
> Do you really think this needs a scoped guard and 4th indentation level,
> instead of just:
>
> ..lock(..);
> err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> vi-
> >intr_coal_rx.max_usecs,
> vi-
> >intr_coal_rx.max_packets);
> ..unlock(..);
I'll change it in the next version.
> if (err)
> return err;
>
> > + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> > + vi->rq[i].intr_coal.max_usecs = ec-
> >rx_coalesce_usecs;
> > + vi->rq[i].intr_coal.max_packets = ec-
> >rx_max_coalesced_frames;
> > + }
>
> :-|
> --
> pw-bot: cr
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-04-16 3:15 ` Dan Jurgens
@ 2024-04-16 14:27 ` Jakub Kicinski
2024-04-16 19:24 ` Dan Jurgens
0 siblings, 1 reply; 13+ messages in thread
From: Jakub Kicinski @ 2024-04-16 14:27 UTC (permalink / raw)
To: Dan Jurgens
Cc: netdev@vger.kernel.org, mst@redhat.com, jasowang@redhat.com,
xuanzhuo@linux.alibaba.com, virtualization@lists.linux.dev,
davem@davemloft.net, edumazet@google.com, pabeni@redhat.com,
Jiri Pirko
On Tue, 16 Apr 2024 03:15:34 +0000 Dan Jurgens wrote:
> Which version? It compiles for me with:
> $ clang -v
> clang version 15.0.7 (Fedora 15.0.7-2.fc37)
clang version 17.0.6 (Fedora 17.0.6-2.fc39)
allmodconfig
The combination of UNIQUE() goto and guard seems to make it unhappy:
../drivers/net/virtio_net.c:3613:3: error: cannot jump from this goto
statement to its label 3613 | goto out; | ^
../drivers/net/virtio_net.c:3615:2: note: jump bypasses initialization of variable with __attribute__((cleanup))
3615 | guard(spinlock)(&rq->intr_coal_lock);
| ^
../include/linux/cleanup.h:164:15: note: expanded from macro 'guard'
164 | CLASS(_name, __UNIQUE_ID(guard))
| ^
../include/linux/compiler.h:189:29: note: expanded from macro '__UNIQUE_ID'
189 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
| ^
./../include/linux/compiler_types.h:84:22: note: expanded from macro '__PASTE'
84 | #define __PASTE(a,b) ___PASTE(a,b)
| ^
./../include/linux/compiler_types.h:83:23: note: expanded from macro '___PASTE'
83 | #define ___PASTE(a,b) a##b
| ^
<scratch space>:18:1: note: expanded from here
18 | __UNIQUE_ID_guard2044
| ^
1 error generated.
^ permalink raw reply [flat|nested] 13+ messages in thread
* RE: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce
2024-04-16 14:27 ` Jakub Kicinski
@ 2024-04-16 19:24 ` Dan Jurgens
0 siblings, 0 replies; 13+ messages in thread
From: Dan Jurgens @ 2024-04-16 19:24 UTC (permalink / raw)
To: Jakub Kicinski
Cc: netdev@vger.kernel.org, mst@redhat.com, jasowang@redhat.com,
xuanzhuo@linux.alibaba.com, virtualization@lists.linux.dev,
davem@davemloft.net, edumazet@google.com, pabeni@redhat.com,
Jiri Pirko
> From: Jakub Kicinski <kuba@kernel.org>
> Sent: Tuesday, April 16, 2024 9:27 AM
> To: Dan Jurgens <danielj@nvidia.com>
> On Tue, 16 Apr 2024 03:15:34 +0000 Dan Jurgens wrote:
> > Which version? It compiles for me with:
> > $ clang -v
> > clang version 15.0.7 (Fedora 15.0.7-2.fc37)
>
> clang version 17.0.6 (Fedora 17.0.6-2.fc39)
>
Thanks, I was able to see this with the newer version. The changes to address Heng's comment resolves it as well.
> allmodconfig
>
> The combination of UNIQUE() goto and guard seems to make it unhappy:
>
> ../drivers/net/virtio_net.c:3613:3: error: cannot jump from this goto
> statement to its label 3613 | goto out; | ^
> ../drivers/net/virtio_net.c:3615:2: note: jump bypasses initialization of
> variable with __attribute__((cleanup))
> 3615 | guard(spinlock)(&rq->intr_coal_lock);
> | ^
> ../include/linux/cleanup.h:164:15: note: expanded from macro 'guard'
> 164 | CLASS(_name, __UNIQUE_ID(guard))
> | ^
> ../include/linux/compiler.h:189:29: note: expanded from macro
> '__UNIQUE_ID'
> 189 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_,
> prefix), __COUNTER__)
> | ^
> ./../include/linux/compiler_types.h:84:22: note: expanded from macro
> '__PASTE'
> 84 | #define __PASTE(a,b) ___PASTE(a,b)
> | ^
> ./../include/linux/compiler_types.h:83:23: note: expanded from macro
> '___PASTE'
> 83 | #define ___PASTE(a,b) a##b
> | ^
> <scratch space>:18:1: note: expanded from here
> 18 | __UNIQUE_ID_guard2044
> | ^
> 1 error generated.
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2024-04-16 19:24 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-04-12 19:53 [PATCH net-next v3 0/6] Remove RTNL lock protection of CVQ Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 1/6] virtio_net: Store RSS setting in virtnet_info Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 2/6] virtio_net: Remove command data from control_buf Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 3/6] virtio_net: Add a lock for the command VQ Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 4/6] virtio_net: Do DIM update for specified queue only Daniel Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX coalesce Daniel Jurgens
2024-04-13 2:21 ` Jakub Kicinski
2024-04-16 3:15 ` Dan Jurgens
2024-04-16 14:27 ` Jakub Kicinski
2024-04-16 19:24 ` Dan Jurgens
2024-04-15 13:41 ` Heng Qi
2024-04-15 20:24 ` Dan Jurgens
2024-04-12 19:53 ` [PATCH net-next v3 6/6] virtio_net: Remove rtnl lock protection of command buffers Daniel Jurgens
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).